VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 56295

Last change on this file since 56295 was 56287, checked in by vboxsync, 9 years ago

VMM: Updated (C) year.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 262.0 KB
Line 
1/* $Id: PATM.cpp 56287 2015-06-09 11:15:22Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2015 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/cpum.h>
29#include <VBox/vmm/cpumdis.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/mm.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/ssm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/cfgm.h>
37#include <VBox/param.h>
38#include <VBox/vmm/selm.h>
39#include <VBox/vmm/csam.h>
40#include <iprt/avl.h>
41#include "PATMInternal.h"
42#include "PATMPatch.h"
43#include <VBox/vmm/vm.h>
44#include <VBox/vmm/uvm.h>
45#include <VBox/dbg.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <iprt/assert.h>
49#include <iprt/asm.h>
50#include <VBox/dis.h>
51#include <VBox/disopcode.h>
52#include "internal/pgm.h"
53
54#include <iprt/string.h>
55#include "PATMA.h"
56
57//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
58//#define PATM_DISABLE_ALL
59
60/**
61 * Refresh trampoline patch state.
62 */
63typedef struct PATMREFRESHPATCH
64{
65 /** Pointer to the VM structure. */
66 PVM pVM;
67 /** The trampoline patch record. */
68 PPATCHINFO pPatchTrampoline;
69 /** The new patch we want to jump to. */
70 PPATCHINFO pPatchRec;
71} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
72
73
74#define PATMREAD_RAWCODE 1 /* read code as-is */
75#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
76#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
77
78/*
79 * Private structure used during disassembly
80 */
81typedef struct
82{
83 PVM pVM;
84 PPATCHINFO pPatchInfo;
85 R3PTRTYPE(uint8_t *) pbInstrHC;
86 RTRCPTR pInstrGC;
87 uint32_t fReadFlags;
88} PATMDISASM, *PPATMDISASM;
89
90
91/*******************************************************************************
92* Internal Functions *
93*******************************************************************************/
94static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
95static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
96static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97
98#ifdef LOG_ENABLED // keep gcc quiet
99static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
100#endif
101#ifdef VBOX_WITH_STATISTICS
102static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
103static void patmResetStat(PVM pVM, void *pvSample);
104static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
105#endif
106
107#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
108#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
109
110static int patmReinit(PVM pVM);
111static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
112static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
113static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
114
115#ifdef VBOX_WITH_DEBUGGER
116static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
117static FNDBGCCMD patmr3CmdOn;
118static FNDBGCCMD patmr3CmdOff;
119
120/** Command descriptors. */
121static const DBGCCMD g_aCmds[] =
122{
123 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
124 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
125 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
126};
127#endif
128
129/* Don't want to break saved states, so put it here as a global variable. */
130static unsigned int cIDTHandlersDisabled = 0;
131
132/**
133 * Initializes the PATM.
134 *
135 * @returns VBox status code.
136 * @param pVM Pointer to the VM.
137 */
138VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
139{
140 int rc;
141
142 /*
143 * We only need a saved state dummy loader if HM is enabled.
144 */
145 if (HMIsEnabled(pVM))
146 {
147 pVM->fPATMEnabled = false;
148 return SSMR3RegisterStub(pVM, "PATM", 0);
149 }
150
151 /*
152 * Raw-mode.
153 */
154 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
155
156 /* These values can't change as they are hardcoded in patch code (old saved states!) */
157 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
158 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
159 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
160 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
161
162 AssertReleaseMsg(g_fPatmInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
163 ("Interrupt flags out of sync!! g_fPatmInterruptFlag=%#x expected %#x. broken assembler?\n", g_fPatmInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
164
165 /* Allocate patch memory and GC patch state memory. */
166 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
167 /* Add another page in case the generated code is much larger than expected. */
168 /** @todo bad safety precaution */
169 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
170 if (RT_FAILURE(rc))
171 {
172 Log(("MMHyperAlloc failed with %Rrc\n", rc));
173 return rc;
174 }
175 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
176
177 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
178 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
179 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
180
181 patmR3DbgInit(pVM);
182
183 /*
184 * Hypervisor memory for GC status data (read/write)
185 *
186 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
187 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
188 *
189 */
190 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
191 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
192 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
193
194 /* Hypervisor memory for patch statistics */
195 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
196 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
197
198 /* Memory for patch lookup trees. */
199 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
200 AssertRCReturn(rc, rc);
201 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
202
203#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
204 /* Check CFGM option. */
205 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
206 if (RT_FAILURE(rc))
207# ifdef PATM_DISABLE_ALL
208 pVM->fPATMEnabled = false;
209# else
210 pVM->fPATMEnabled = true;
211# endif
212#endif
213
214 rc = patmReinit(pVM);
215 AssertRC(rc);
216 if (RT_FAILURE(rc))
217 return rc;
218
219 /*
220 * Register the virtual page access handler type.
221 */
222 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_ALL, false /*fRelocUserRC*/,
223 NULL /*pfnInvalidateR3*/,
224 patmVirtPageHandler,
225 "patmVirtPageHandler", "patmRCVirtPagePfHandler",
226 "PATMMonitorPatchJump", &pVM->patm.s.hMonitorPageType);
227 AssertRCReturn(rc, rc);
228
229 /*
230 * Register save and load state notifiers.
231 */
232 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SAVED_STATE_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
233 NULL, NULL, NULL,
234 NULL, patmR3Save, NULL,
235 NULL, patmR3Load, NULL);
236 AssertRCReturn(rc, rc);
237
238#ifdef VBOX_WITH_DEBUGGER
239 /*
240 * Debugger commands.
241 */
242 static bool s_fRegisteredCmds = false;
243 if (!s_fRegisteredCmds)
244 {
245 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
246 if (RT_SUCCESS(rc2))
247 s_fRegisteredCmds = true;
248 }
249#endif
250
251#ifdef VBOX_WITH_STATISTICS
252 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
253 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
254 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
255 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
256 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
257 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
258 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
259 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
260
261 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
262 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
263
264 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
265 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
266 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
269 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
270 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
271 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
272 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
273
274 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
275 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
276
277 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
278 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
279
280 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
281 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
282 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
283
284 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
285 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
286 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
287
288 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
289 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
290
291 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
292 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
293 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
294 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
295
296 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
297 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
298
299 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
300 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
301
302 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
303 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
304 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
305
306 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
307 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
308 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
309 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
310
311 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
312 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
313 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
314 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
315 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
316
317 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
318#endif /* VBOX_WITH_STATISTICS */
319
320 Log(("g_patmCallRecord.cbFunction %u\n", g_patmCallRecord.cbFunction));
321 Log(("g_patmCallIndirectRecord.cbFunction %u\n", g_patmCallIndirectRecord.cbFunction));
322 Log(("g_patmRetRecord.cbFunction %u\n", g_patmRetRecord.cbFunction));
323 Log(("g_patmJumpIndirectRecord.cbFunction %u\n", g_patmJumpIndirectRecord.cbFunction));
324 Log(("g_patmPopf32Record.cbFunction %u\n", g_patmPopf32Record.cbFunction));
325 Log(("g_patmIretRecord.cbFunction %u\n", g_patmIretRecord.cbFunction));
326 Log(("g_patmStiRecord.cbFunction %u\n", g_patmStiRecord.cbFunction));
327 Log(("g_patmCheckIFRecord.cbFunction %u\n", g_patmCheckIFRecord.cbFunction));
328
329 return rc;
330}
331
332/**
333 * Finalizes HMA page attributes.
334 *
335 * @returns VBox status code.
336 * @param pVM Pointer to the VM.
337 */
338VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
339{
340 if (HMIsEnabled(pVM))
341 return VINF_SUCCESS;
342
343 /*
344 * The GC state, stack and statistics must be read/write for the guest
345 * (supervisor only of course).
346 *
347 * Remember, we run guest code at ring-1 and ring-2 levels, which are
348 * considered supervisor levels by the paging structures. We run the VMM
349 * in ring-0 with CR0.WP=0 and mapping all VMM structures as read-only
350 * pages. The following structures are exceptions and must be mapped with
351 * write access so the ring-1 and ring-2 code can modify them.
352 */
353 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
354 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCState accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
355
356 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
357 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCStack accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
358
359 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
360 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the stats struct accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
361
362 /*
363 * Find the patch helper segment so we can identify code running there as patch code.
364 */
365 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpBegin", &pVM->patm.s.pbPatchHelpersRC);
366 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpBegin: %Rrc\n", rc), rc);
367 pVM->patm.s.pbPatchHelpersR3 = (uint8_t *)MMHyperRCToR3(pVM, pVM->patm.s.pbPatchHelpersRC);
368 AssertLogRelReturn(pVM->patm.s.pbPatchHelpersR3 != NULL, VERR_INTERNAL_ERROR_3);
369
370 RTRCPTR RCPtrEnd;
371 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpEnd", &RCPtrEnd);
372 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpEnd: %Rrc\n", rc), rc);
373
374 pVM->patm.s.cbPatchHelpers = RCPtrEnd - pVM->patm.s.pbPatchHelpersRC;
375 AssertLogRelMsgReturn(pVM->patm.s.cbPatchHelpers < _128K,
376 ("%RRv-%RRv => %#x\n", pVM->patm.s.pbPatchHelpersRC, RCPtrEnd, pVM->patm.s.cbPatchHelpers),
377 VERR_INTERNAL_ERROR_4);
378
379
380 return VINF_SUCCESS;
381}
382
383/**
384 * (Re)initializes PATM
385 *
386 * @param pVM The VM.
387 */
388static int patmReinit(PVM pVM)
389{
390 int rc;
391
392 /*
393 * Assert alignment and sizes.
394 */
395 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
396 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
397
398 /*
399 * Setup any fixed pointers and offsets.
400 */
401 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
402
403#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
404#ifndef PATM_DISABLE_ALL
405 pVM->fPATMEnabled = true;
406#endif
407#endif
408
409 Assert(pVM->patm.s.pGCStateHC);
410 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
411 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
412
413 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
414 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
415
416 Assert(pVM->patm.s.pGCStackHC);
417 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
418 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
419 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
420 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
421
422 Assert(pVM->patm.s.pStatsHC);
423 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
424 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
425
426 Assert(pVM->patm.s.pPatchMemHC);
427 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
428 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
429 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
430
431 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
432 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
433
434 Assert(pVM->patm.s.PatchLookupTreeHC);
435 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
436
437 /*
438 * (Re)Initialize PATM structure
439 */
440 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
441 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
442 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
443 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
444 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
445 pVM->patm.s.pvFaultMonitor = 0;
446 pVM->patm.s.deltaReloc = 0;
447
448 /* Lowest and highest patched instruction */
449 pVM->patm.s.pPatchedInstrGCLowest = ~0;
450 pVM->patm.s.pPatchedInstrGCHighest = 0;
451
452 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
453 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
454 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
455
456 pVM->patm.s.pfnSysEnterPatchGC = 0;
457 pVM->patm.s.pfnSysEnterGC = 0;
458
459 pVM->patm.s.fOutOfMemory = false;
460
461 pVM->patm.s.pfnHelperCallGC = 0;
462 patmR3DbgReset(pVM);
463
464 /* Generate all global functions to be used by future patches. */
465 /* We generate a fake patch in order to use the existing code for relocation. */
466 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
467 if (RT_FAILURE(rc))
468 {
469 Log(("Out of memory!!!!\n"));
470 return VERR_NO_MEMORY;
471 }
472 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
473 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
474 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
475
476 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
477 AssertRC(rc);
478
479 /* Update free pointer in patch memory. */
480 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
481 /* Round to next 8 byte boundary. */
482 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
483
484
485 return rc;
486}
487
488
489/**
490 * Applies relocations to data and code managed by this
491 * component. This function will be called at init and
492 * whenever the VMM need to relocate it self inside the GC.
493 *
494 * The PATM will update the addresses used by the switcher.
495 *
496 * @param pVM The VM.
497 * @param offDelta The relocation delta.
498 */
499VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta)
500{
501 if (HMIsEnabled(pVM))
502 return;
503
504 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
505 Assert((RTRCINTPTR)(GCPtrNew - pVM->patm.s.pGCStateGC) == offDelta);
506
507 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, offDelta));
508 if (offDelta)
509 {
510 PCPUMCTX pCtx;
511
512 /* Update CPUMCTX guest context pointer. */
513 pVM->patm.s.pCPUMCtxGC += offDelta;
514
515 pVM->patm.s.deltaReloc = offDelta;
516 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmR3RelocatePatches, (void *)pVM);
517
518 pVM->patm.s.pGCStateGC = GCPtrNew;
519 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
520 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
521 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
522 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
523
524 if (pVM->patm.s.pfnSysEnterPatchGC)
525 pVM->patm.s.pfnSysEnterPatchGC += offDelta;
526
527 /* If we are running patch code right now, then also adjust EIP. */
528 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
529 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
530 pCtx->eip += offDelta;
531
532 /* Deal with the global patch functions. */
533 pVM->patm.s.pfnHelperCallGC += offDelta;
534 pVM->patm.s.pfnHelperRetGC += offDelta;
535 pVM->patm.s.pfnHelperIretGC += offDelta;
536 pVM->patm.s.pfnHelperJumpGC += offDelta;
537
538 pVM->patm.s.pbPatchHelpersRC += offDelta;
539
540 patmR3RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
541 }
542}
543
544
545/**
546 * Terminates the PATM.
547 *
548 * Termination means cleaning up and freeing all resources,
549 * the VM it self is at this point powered off or suspended.
550 *
551 * @returns VBox status code.
552 * @param pVM Pointer to the VM.
553 */
554VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
555{
556 if (HMIsEnabled(pVM))
557 return VINF_SUCCESS;
558
559 patmR3DbgTerm(pVM);
560
561 /* Memory was all allocated from the two MM heaps and requires no freeing. */
562 return VINF_SUCCESS;
563}
564
565
566/**
567 * PATM reset callback.
568 *
569 * @returns VBox status code.
570 * @param pVM The VM which is reset.
571 */
572VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
573{
574 Log(("PATMR3Reset\n"));
575 if (HMIsEnabled(pVM))
576 return VINF_SUCCESS;
577
578 /* Free all patches. */
579 for (;;)
580 {
581 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
582 if (pPatchRec)
583 patmR3RemovePatch(pVM, pPatchRec, true);
584 else
585 break;
586 }
587 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
588 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
589 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
590 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
591
592 int rc = patmReinit(pVM);
593 if (RT_SUCCESS(rc))
594 rc = PATMR3InitFinalize(pVM); /* paranoia */
595
596 return rc;
597}
598
599/**
600 * @callback_method_impl{FNDISREADBYTES}
601 */
602static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
603{
604 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
605
606/** @todo change this to read more! */
607 /*
608 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
609 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
610 */
611 /** @todo could change in the future! */
612 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
613 {
614 size_t cbRead = cbMaxRead;
615 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
616 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
617 if (RT_SUCCESS(rc))
618 {
619 if (cbRead >= cbMinRead)
620 {
621 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
622 return VINF_SUCCESS;
623 }
624
625 cbMinRead -= (uint8_t)cbRead;
626 cbMaxRead -= (uint8_t)cbRead;
627 offInstr += (uint8_t)cbRead;
628 uSrcAddr += cbRead;
629 }
630
631#ifdef VBOX_STRICT
632 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
633 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
634 {
635 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
636 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
637 }
638#endif
639 }
640
641 int rc = VINF_SUCCESS;
642 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
643 if ( !pDisInfo->pbInstrHC
644 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
645 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
646 {
647 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
648 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
649 offInstr += cbMinRead;
650 }
651 else
652 {
653 /*
654 * pbInstrHC is the base address; adjust according to the GC pointer.
655 *
656 * Try read the max number of bytes here. Since the disassembler only
657 * ever uses these bytes for the current instruction, it doesn't matter
658 * much if we accidentally read the start of the next instruction even
659 * if it happens to be a patch jump or int3.
660 */
661 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
662 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
663
664 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
665 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
666 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
667 if (cbToRead > cbMaxRead)
668 cbToRead = cbMaxRead;
669
670 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
671 offInstr += (uint8_t)cbToRead;
672 }
673
674 pDis->cbCachedInstr = offInstr;
675 return rc;
676}
677
678
679DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
680 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
681{
682 PATMDISASM disinfo;
683 disinfo.pVM = pVM;
684 disinfo.pPatchInfo = pPatch;
685 disinfo.pbInstrHC = pbInstrHC;
686 disinfo.pInstrGC = InstrGCPtr32;
687 disinfo.fReadFlags = fReadFlags;
688 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
689 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
690 patmReadBytes, &disinfo,
691 pCpu, pcbInstr, pszOutput, cbOutput));
692}
693
694
695DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
696 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
697{
698 PATMDISASM disinfo;
699 disinfo.pVM = pVM;
700 disinfo.pPatchInfo = pPatch;
701 disinfo.pbInstrHC = pbInstrHC;
702 disinfo.pInstrGC = InstrGCPtr32;
703 disinfo.fReadFlags = fReadFlags;
704 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
705 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
706 patmReadBytes, &disinfo,
707 pCpu, pcbInstr));
708}
709
710
711DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
712 uint32_t fReadFlags,
713 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
714{
715 PATMDISASM disinfo;
716 disinfo.pVM = pVM;
717 disinfo.pPatchInfo = pPatch;
718 disinfo.pbInstrHC = pbInstrHC;
719 disinfo.pInstrGC = InstrGCPtr32;
720 disinfo.fReadFlags = fReadFlags;
721 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
722 pCpu, pcbInstr));
723}
724
725#ifdef LOG_ENABLED
726# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
727 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
728# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
729 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
730
731# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
732 do { \
733 if (LogIsEnabled()) \
734 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
735 } while (0)
736
737static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
738 const char *pszComment1, const char *pszComment2)
739{
740 DISCPUSTATE DisState;
741 char szOutput[128];
742 szOutput[0] = '\0';
743 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
744 &DisState, NULL, szOutput, sizeof(szOutput));
745 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
746}
747
748#else
749# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
750# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
751# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
752#endif
753
754
755/**
756 * Callback function for RTAvloU32DoWithAll
757 *
758 * Updates all fixups in the patches
759 *
760 * @returns VBox status code.
761 * @param pNode Current node
762 * @param pParam Pointer to the VM.
763 */
764static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
765{
766 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
767 PVM pVM = (PVM)pParam;
768 RTRCINTPTR delta;
769 int rc;
770
771 /* Nothing to do if the patch is not active. */
772 if (pPatch->patch.uState == PATCH_REFUSED)
773 return 0;
774
775 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
776 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
777
778 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
779 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
780
781 /*
782 * Apply fixups.
783 */
784 AVLPVKEY key = NULL;
785 for (;;)
786 {
787 /* Get the record that's closest from above (after or equal to key). */
788 PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
789 if (!pRec)
790 break;
791
792 key = (uint8_t *)pRec->Core.Key + 1; /* search for the next record during the next round. */
793
794 switch (pRec->uType)
795 {
796 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
797 Assert(pRec->pDest == pRec->pSource); Assert(PATM_IS_ASMFIX(pRec->pSource));
798 Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
799 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
800 break;
801
802 case FIXUP_ABSOLUTE:
803 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
804 if ( !pRec->pSource
805 || PATMIsPatchGCAddr(pVM, pRec->pSource))
806 {
807 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
808 }
809 else
810 {
811 uint8_t curInstr[15];
812 uint8_t oldInstr[15];
813 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
814
815 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
816
817 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
818 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
819
820 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
821 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
822
823 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
824
825 if ( rc == VERR_PAGE_NOT_PRESENT
826 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
827 {
828 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
829
830 Log(("PATM: Patch page not present -> check later!\n"));
831 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
832 pPage,
833 pPage + (PAGE_SIZE - 1) /* inclusive! */,
834 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
835 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
836 }
837 else
838 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
839 {
840 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
841 /*
842 * Disable patch; this is not a good solution
843 */
844 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
845 pPatch->patch.uState = PATCH_DISABLED;
846 }
847 else
848 if (RT_SUCCESS(rc))
849 {
850 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
851 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
852 AssertRC(rc);
853 }
854 }
855 break;
856
857 case FIXUP_REL_JMPTOPATCH:
858 {
859 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
860
861 if ( pPatch->patch.uState == PATCH_ENABLED
862 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
863 {
864 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
865 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
866 RTRCPTR pJumpOffGC;
867 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
868 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
869
870#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
871 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
872#else
873 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
874#endif
875
876 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
877#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
878 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
879 {
880 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
881
882 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
883 oldJump[0] = pPatch->patch.aPrivInstr[0];
884 oldJump[1] = pPatch->patch.aPrivInstr[1];
885 *(RTRCUINTPTR *)&oldJump[2] = displOld;
886 }
887 else
888#endif
889 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
890 {
891 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
892 oldJump[0] = 0xE9;
893 *(RTRCUINTPTR *)&oldJump[1] = displOld;
894 }
895 else
896 {
897 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
898 continue; //this should never happen!!
899 }
900 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
901
902 /*
903 * Read old patch jump and compare it to the one we previously installed
904 */
905 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
906 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
907
908 if ( rc == VERR_PAGE_NOT_PRESENT
909 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
910 {
911 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
912 Log(("PATM: Patch page not present -> check later!\n"));
913 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
914 pPage,
915 pPage + (PAGE_SIZE - 1) /* inclusive! */,
916 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
917 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
918 }
919 else
920 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
921 {
922 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
923 /*
924 * Disable patch; this is not a good solution
925 */
926 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
927 pPatch->patch.uState = PATCH_DISABLED;
928 }
929 else
930 if (RT_SUCCESS(rc))
931 {
932 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
933 AssertRC(rc);
934 }
935 else
936 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
937 }
938 else
939 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
940
941 pRec->pDest = pTarget;
942 break;
943 }
944
945 case FIXUP_REL_JMPTOGUEST:
946 {
947 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
948 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
949
950 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
951 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
952 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
953 pRec->pSource = pSource;
954 break;
955 }
956
957 case FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL:
958 case FIXUP_CONSTANT_IN_PATCH_ASM_TMPL:
959 /* Only applicable when loading state. */
960 Assert(pRec->pDest == pRec->pSource);
961 Assert(PATM_IS_ASMFIX(pRec->pSource));
962 break;
963
964 default:
965 AssertMsg(0, ("Invalid fixup type!!\n"));
966 return VERR_INVALID_PARAMETER;
967 }
968 }
969
970 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
971 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
972 return 0;
973}
974
975#ifdef VBOX_WITH_DEBUGGER
976
977/**
978 * Callback function for RTAvloU32DoWithAll
979 *
980 * Enables the patch that's being enumerated
981 *
982 * @returns 0 (continue enumeration).
983 * @param pNode Current node
984 * @param pVM Pointer to the VM.
985 */
986static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
987{
988 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
989
990 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
991 return 0;
992}
993
994
995/**
996 * Callback function for RTAvloU32DoWithAll
997 *
998 * Disables the patch that's being enumerated
999 *
1000 * @returns 0 (continue enumeration).
1001 * @param pNode Current node
1002 * @param pVM Pointer to the VM.
1003 */
1004static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
1005{
1006 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1007
1008 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1009 return 0;
1010}
1011
1012#endif /* VBOX_WITH_DEBUGGER */
1013
1014/**
1015 * Returns the host context pointer of the GC context structure
1016 *
1017 * @returns VBox status code.
1018 * @param pVM Pointer to the VM.
1019 */
1020VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1021{
1022 AssertReturn(!HMIsEnabled(pVM), NULL);
1023 return pVM->patm.s.pGCStateHC;
1024}
1025
1026
1027/**
1028 * Allows or disallow patching of privileged instructions executed by the guest OS
1029 *
1030 * @returns VBox status code.
1031 * @param pUVM The user mode VM handle.
1032 * @param fAllowPatching Allow/disallow patching
1033 */
1034VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1035{
1036 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1037 PVM pVM = pUVM->pVM;
1038 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1039
1040 if (!HMIsEnabled(pVM))
1041 pVM->fPATMEnabled = fAllowPatching;
1042 else
1043 Assert(!pVM->fPATMEnabled);
1044 return VINF_SUCCESS;
1045}
1046
1047
1048/**
1049 * Checks if the patch manager is enabled or not.
1050 *
1051 * @returns true if enabled, false if not (or if invalid handle).
1052 * @param pUVM The user mode VM handle.
1053 */
1054VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1055{
1056 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1057 PVM pVM = pUVM->pVM;
1058 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1059 return PATMIsEnabled(pVM);
1060}
1061
1062
1063/**
1064 * Convert a GC patch block pointer to a HC patch pointer
1065 *
1066 * @returns HC pointer or NULL if it's not a GC patch pointer
1067 * @param pVM Pointer to the VM.
1068 * @param pAddrGC GC pointer
1069 */
1070VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1071{
1072 AssertReturn(!HMIsEnabled(pVM), NULL);
1073 RTRCUINTPTR offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1074 if (offPatch >= pVM->patm.s.cbPatchMem)
1075 {
1076 offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC;
1077 if (offPatch >= pVM->patm.s.cbPatchHelpers)
1078 return NULL;
1079 return pVM->patm.s.pbPatchHelpersR3 + offPatch;
1080 }
1081 return pVM->patm.s.pPatchMemHC + offPatch;
1082}
1083
1084
1085/**
1086 * Convert guest context address to host context pointer
1087 *
1088 * @returns VBox status code.
1089 * @param pVM Pointer to the VM.
1090 * @param pCacheRec Address conversion cache record
1091 * @param pGCPtr Guest context pointer
1092 *
1093 * @returns Host context pointer or NULL in case of an error
1094 *
1095 */
1096R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1097{
1098 int rc;
1099 R3PTRTYPE(uint8_t *) pHCPtr;
1100 uint32_t offset;
1101
1102 offset = (RTRCUINTPTR)pGCPtr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1103 if (offset < pVM->patm.s.cbPatchMem)
1104 {
1105#ifdef VBOX_STRICT
1106 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1107 Assert(pPatch); Assert(offset - pPatch->pPatchBlockOffset < pPatch->cbPatchBlockSize);
1108#endif
1109 return pVM->patm.s.pPatchMemHC + offset;
1110 }
1111 /* Note! We're _not_ including the patch helpers here. */
1112
1113 offset = pGCPtr & PAGE_OFFSET_MASK;
1114 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1115 return pCacheRec->pPageLocStartHC + offset;
1116
1117 /* Release previous lock if any. */
1118 if (pCacheRec->Lock.pvMap)
1119 {
1120 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1121 pCacheRec->Lock.pvMap = NULL;
1122 }
1123
1124 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1125 if (rc != VINF_SUCCESS)
1126 {
1127 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1128 return NULL;
1129 }
1130 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1131 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1132 return pHCPtr;
1133}
1134
1135
1136/**
1137 * Calculates and fills in all branch targets
1138 *
1139 * @returns VBox status code.
1140 * @param pVM Pointer to the VM.
1141 * @param pPatch Current patch block pointer
1142 *
1143 */
1144static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1145{
1146 int32_t displ;
1147
1148 PJUMPREC pRec = 0;
1149 unsigned nrJumpRecs = 0;
1150
1151 /*
1152 * Set all branch targets inside the patch block.
1153 * We remove all jump records as they are no longer needed afterwards.
1154 */
1155 while (true)
1156 {
1157 RCPTRTYPE(uint8_t *) pInstrGC;
1158 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1159
1160 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1161 if (pRec == 0)
1162 break;
1163
1164 nrJumpRecs++;
1165
1166 /* HC in patch block to GC in patch block. */
1167 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1168
1169 if (pRec->opcode == OP_CALL)
1170 {
1171 /* Special case: call function replacement patch from this patch block.
1172 */
1173 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1174 if (!pFunctionRec)
1175 {
1176 int rc;
1177
1178 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1179 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1180 else
1181 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1182
1183 if (RT_FAILURE(rc))
1184 {
1185 uint8_t *pPatchHC;
1186 RTRCPTR pPatchGC;
1187 RTRCPTR pOrgInstrGC;
1188
1189 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1190 Assert(pOrgInstrGC);
1191
1192 /* Failure for some reason -> mark exit point with int 3. */
1193 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1194
1195 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1196 Assert(pPatchGC);
1197
1198 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1199
1200 /* Set a breakpoint at the very beginning of the recompiled instruction */
1201 *pPatchHC = 0xCC;
1202
1203 continue;
1204 }
1205 }
1206 else
1207 {
1208 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1209 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1210 }
1211
1212 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1213 }
1214 else
1215 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1216
1217 if (pBranchTargetGC == 0)
1218 {
1219 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1220 return VERR_PATCHING_REFUSED;
1221 }
1222 /* Our jumps *always* have a dword displacement (to make things easier). */
1223 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1224 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1225 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1226 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1227 }
1228 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1229 Assert(pPatch->JumpTree == 0);
1230 return VINF_SUCCESS;
1231}
1232
1233/**
1234 * Add an illegal instruction record
1235 *
1236 * @param pVM Pointer to the VM.
1237 * @param pPatch Patch structure ptr
1238 * @param pInstrGC Guest context pointer to privileged instruction
1239 *
1240 */
1241static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1242{
1243 PAVLPVNODECORE pRec;
1244
1245 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1246 Assert(pRec);
1247 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1248
1249 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1250 Assert(ret); NOREF(ret);
1251 pPatch->pTempInfo->nrIllegalInstr++;
1252}
1253
1254static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1255{
1256 PAVLPVNODECORE pRec;
1257
1258 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1259 if (pRec)
1260 return true;
1261 else
1262 return false;
1263}
1264
1265/**
1266 * Add a patch to guest lookup record
1267 *
1268 * @param pVM Pointer to the VM.
1269 * @param pPatch Patch structure ptr
1270 * @param pPatchInstrHC Guest context pointer to patch block
1271 * @param pInstrGC Guest context pointer to privileged instruction
1272 * @param enmType Lookup type
1273 * @param fDirty Dirty flag
1274 *
1275 * @note Be extremely careful with this function. Make absolutely sure the guest
1276 * address is correct! (to avoid executing instructions twice!)
1277 */
1278void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1279{
1280 bool ret;
1281 PRECPATCHTOGUEST pPatchToGuestRec;
1282 PRECGUESTTOPATCH pGuestToPatchRec;
1283 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1284
1285 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1286 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1287
1288 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1289 {
1290 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1291 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1292 return; /* already there */
1293
1294 Assert(!pPatchToGuestRec);
1295 }
1296#ifdef VBOX_STRICT
1297 else
1298 {
1299 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1300 Assert(!pPatchToGuestRec);
1301 }
1302#endif
1303
1304 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1305 Assert(pPatchToGuestRec);
1306 pPatchToGuestRec->Core.Key = PatchOffset;
1307 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1308 pPatchToGuestRec->enmType = enmType;
1309 pPatchToGuestRec->fDirty = fDirty;
1310
1311 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1312 Assert(ret);
1313
1314 /* GC to patch address */
1315 if (enmType == PATM_LOOKUP_BOTHDIR)
1316 {
1317 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1318 if (!pGuestToPatchRec)
1319 {
1320 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1321 pGuestToPatchRec->Core.Key = pInstrGC;
1322 pGuestToPatchRec->PatchOffset = PatchOffset;
1323
1324 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1325 Assert(ret);
1326 }
1327 }
1328
1329 pPatch->nrPatch2GuestRecs++;
1330}
1331
1332
1333/**
1334 * Removes a patch to guest lookup record
1335 *
1336 * @param pVM Pointer to the VM.
1337 * @param pPatch Patch structure ptr
1338 * @param pPatchInstrGC Guest context pointer to patch block
1339 */
1340void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1341{
1342 PAVLU32NODECORE pNode;
1343 PAVLU32NODECORE pNode2;
1344 PRECPATCHTOGUEST pPatchToGuestRec;
1345 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1346
1347 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1348 Assert(pPatchToGuestRec);
1349 if (pPatchToGuestRec)
1350 {
1351 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1352 {
1353 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1354
1355 Assert(pGuestToPatchRec->Core.Key);
1356 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1357 Assert(pNode2);
1358 }
1359 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1360 Assert(pNode);
1361
1362 MMR3HeapFree(pPatchToGuestRec);
1363 pPatch->nrPatch2GuestRecs--;
1364 }
1365}
1366
1367
1368/**
1369 * RTAvlPVDestroy callback.
1370 */
1371static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1372{
1373 MMR3HeapFree(pNode);
1374 return 0;
1375}
1376
1377/**
1378 * Empty the specified tree (PV tree, MMR3 heap)
1379 *
1380 * @param pVM Pointer to the VM.
1381 * @param ppTree Tree to empty
1382 */
1383static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1384{
1385 NOREF(pVM);
1386 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1387}
1388
1389
1390/**
1391 * RTAvlU32Destroy callback.
1392 */
1393static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1394{
1395 MMR3HeapFree(pNode);
1396 return 0;
1397}
1398
1399/**
1400 * Empty the specified tree (U32 tree, MMR3 heap)
1401 *
1402 * @param pVM Pointer to the VM.
1403 * @param ppTree Tree to empty
1404 */
1405static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1406{
1407 NOREF(pVM);
1408 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1409}
1410
1411
1412/**
1413 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1414 *
1415 * @returns VBox status code.
1416 * @param pVM Pointer to the VM.
1417 * @param pCpu CPU disassembly state
1418 * @param pInstrGC Guest context pointer to privileged instruction
1419 * @param pCurInstrGC Guest context pointer to the current instruction
1420 * @param pCacheRec Cache record ptr
1421 *
1422 */
1423static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1424{
1425 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1426 bool fIllegalInstr = false;
1427
1428 /*
1429 * Preliminary heuristics:
1430 *- no call instructions without a fixed displacement between cli and sti/popf
1431 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1432 *- no nested pushf/cli
1433 *- sti/popf should be the (eventual) target of all branches
1434 *- no near or far returns; no int xx, no into
1435 *
1436 * Note: Later on we can impose less stricter guidelines if the need arises
1437 */
1438
1439 /* Bail out if the patch gets too big. */
1440 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1441 {
1442 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1443 fIllegalInstr = true;
1444 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1445 }
1446 else
1447 {
1448 /* No unconditional jumps or calls without fixed displacements. */
1449 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1450 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1451 )
1452 {
1453 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1454 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1455 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1456 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1457 )
1458 {
1459 fIllegalInstr = true;
1460 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1461 }
1462 }
1463
1464 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1465 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1466 {
1467 if ( pCurInstrGC > pPatch->pPrivInstrGC
1468 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1469 {
1470 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1471 /* We turn this one into a int 3 callable patch. */
1472 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1473 }
1474 }
1475 else
1476 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1477 if (pPatch->opcode == OP_PUSHF)
1478 {
1479 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1480 {
1481 fIllegalInstr = true;
1482 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1483 }
1484 }
1485
1486 /* no far returns */
1487 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1488 {
1489 pPatch->pTempInfo->nrRetInstr++;
1490 fIllegalInstr = true;
1491 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1492 }
1493 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1494 || pCpu->pCurInstr->uOpcode == OP_INT
1495 || pCpu->pCurInstr->uOpcode == OP_INTO)
1496 {
1497 /* No int xx or into either. */
1498 fIllegalInstr = true;
1499 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1500 }
1501 }
1502
1503 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1504
1505 /* Illegal instruction -> end of analysis phase for this code block */
1506 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1507 return VINF_SUCCESS;
1508
1509 /* Check for exit points. */
1510 switch (pCpu->pCurInstr->uOpcode)
1511 {
1512 case OP_SYSEXIT:
1513 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1514
1515 case OP_SYSENTER:
1516 case OP_ILLUD2:
1517 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1518 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1519 return VINF_SUCCESS;
1520
1521 case OP_STI:
1522 case OP_POPF:
1523 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1524 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1525 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1526 {
1527 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1528 return VERR_PATCHING_REFUSED;
1529 }
1530 if (pPatch->opcode == OP_PUSHF)
1531 {
1532 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1533 {
1534 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1535 return VINF_SUCCESS;
1536
1537 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1538 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1539 pPatch->flags |= PATMFL_CHECK_SIZE;
1540 }
1541 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1542 }
1543 /* else: fall through. */
1544 case OP_RETN: /* exit point for function replacement */
1545 return VINF_SUCCESS;
1546
1547 case OP_IRET:
1548 return VINF_SUCCESS; /* exitpoint */
1549
1550 case OP_CPUID:
1551 case OP_CALL:
1552 case OP_JMP:
1553 break;
1554
1555#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1556 case OP_STR:
1557 break;
1558#endif
1559
1560 default:
1561 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1562 {
1563 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1564 return VINF_SUCCESS; /* exit point */
1565 }
1566 break;
1567 }
1568
1569 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1570 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1571 {
1572 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1573 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1574 return VINF_SUCCESS;
1575 }
1576
1577 return VWRN_CONTINUE_ANALYSIS;
1578}
1579
1580/**
1581 * Analyses the instructions inside a function for compliance
1582 *
1583 * @returns VBox status code.
1584 * @param pVM Pointer to the VM.
1585 * @param pCpu CPU disassembly state
1586 * @param pInstrGC Guest context pointer to privileged instruction
1587 * @param pCurInstrGC Guest context pointer to the current instruction
1588 * @param pCacheRec Cache record ptr
1589 *
1590 */
1591static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1592{
1593 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1594 bool fIllegalInstr = false;
1595 NOREF(pInstrGC);
1596
1597 //Preliminary heuristics:
1598 //- no call instructions
1599 //- ret ends a block
1600
1601 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1602
1603 // bail out if the patch gets too big
1604 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1605 {
1606 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1607 fIllegalInstr = true;
1608 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1609 }
1610 else
1611 {
1612 // no unconditional jumps or calls without fixed displacements
1613 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1614 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1615 )
1616 {
1617 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1618 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1619 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1620 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1621 )
1622 {
1623 fIllegalInstr = true;
1624 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1625 }
1626 }
1627 else /* no far returns */
1628 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1629 {
1630 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1631 fIllegalInstr = true;
1632 }
1633 else /* no int xx or into either */
1634 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1635 {
1636 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1637 fIllegalInstr = true;
1638 }
1639
1640 #if 0
1641 ///@todo we can handle certain in/out and privileged instructions in the guest context
1642 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1643 {
1644 Log(("Illegal instructions for function patch!!\n"));
1645 return VERR_PATCHING_REFUSED;
1646 }
1647 #endif
1648 }
1649
1650 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1651
1652 /* Illegal instruction -> end of analysis phase for this code block */
1653 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1654 {
1655 return VINF_SUCCESS;
1656 }
1657
1658 // Check for exit points
1659 switch (pCpu->pCurInstr->uOpcode)
1660 {
1661 case OP_ILLUD2:
1662 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1663 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1664 return VINF_SUCCESS;
1665
1666 case OP_IRET:
1667 case OP_SYSEXIT: /* will fault or emulated in GC */
1668 case OP_RETN:
1669 return VINF_SUCCESS;
1670
1671#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1672 case OP_STR:
1673 break;
1674#endif
1675
1676 case OP_POPF:
1677 case OP_STI:
1678 return VWRN_CONTINUE_ANALYSIS;
1679 default:
1680 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1681 {
1682 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1683 return VINF_SUCCESS; /* exit point */
1684 }
1685 return VWRN_CONTINUE_ANALYSIS;
1686 }
1687
1688 return VWRN_CONTINUE_ANALYSIS;
1689}
1690
1691/**
1692 * Recompiles the instructions in a code block
1693 *
1694 * @returns VBox status code.
1695 * @param pVM Pointer to the VM.
1696 * @param pCpu CPU disassembly state
1697 * @param pInstrGC Guest context pointer to privileged instruction
1698 * @param pCurInstrGC Guest context pointer to the current instruction
1699 * @param pCacheRec Cache record ptr
1700 *
1701 */
1702static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1703{
1704 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1705 int rc = VINF_SUCCESS;
1706 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1707
1708 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1709
1710 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1711 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1712 {
1713 /*
1714 * Been there, done that; so insert a jump (we don't want to duplicate code)
1715 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1716 */
1717 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1718 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1719 }
1720
1721 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1722 {
1723 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1724 }
1725 else
1726 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1727
1728 if (RT_FAILURE(rc))
1729 return rc;
1730
1731 /* Note: Never do a direct return unless a failure is encountered! */
1732
1733 /* Clear recompilation of next instruction flag; we are doing that right here. */
1734 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1735 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1736
1737 /* Add lookup record for patch to guest address translation */
1738 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1739
1740 /* Update lowest and highest instruction address for this patch */
1741 if (pCurInstrGC < pPatch->pInstrGCLowest)
1742 pPatch->pInstrGCLowest = pCurInstrGC;
1743 else
1744 if (pCurInstrGC > pPatch->pInstrGCHighest)
1745 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1746
1747 /* Illegal instruction -> end of recompile phase for this code block. */
1748 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1749 {
1750 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1751 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1752 goto end;
1753 }
1754
1755 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1756 * Indirect calls are handled below.
1757 */
1758 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1759 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1760 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1761 {
1762 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1763 if (pTargetGC == 0)
1764 {
1765 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1766 return VERR_PATCHING_REFUSED;
1767 }
1768
1769 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1770 {
1771 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1772 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1773 if (RT_FAILURE(rc))
1774 goto end;
1775 }
1776 else
1777 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1778
1779 if (RT_SUCCESS(rc))
1780 rc = VWRN_CONTINUE_RECOMPILE;
1781
1782 goto end;
1783 }
1784
1785 switch (pCpu->pCurInstr->uOpcode)
1786 {
1787 case OP_CLI:
1788 {
1789 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1790 * until we've found the proper exit point(s).
1791 */
1792 if ( pCurInstrGC != pInstrGC
1793 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1794 )
1795 {
1796 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1797 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1798 }
1799 /* Set by irq inhibition; no longer valid now. */
1800 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1801
1802 rc = patmPatchGenCli(pVM, pPatch);
1803 if (RT_SUCCESS(rc))
1804 rc = VWRN_CONTINUE_RECOMPILE;
1805 break;
1806 }
1807
1808 case OP_MOV:
1809 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1810 {
1811 /* mov ss, src? */
1812 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1813 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1814 {
1815 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1816 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1817 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1818 }
1819#if 0 /* necessary for Haiku */
1820 else
1821 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1822 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1823 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1824 {
1825 /* mov GPR, ss */
1826 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1827 if (RT_SUCCESS(rc))
1828 rc = VWRN_CONTINUE_RECOMPILE;
1829 break;
1830 }
1831#endif
1832 }
1833 goto duplicate_instr;
1834
1835 case OP_POP:
1836 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1837 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1838 {
1839 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1840
1841 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1842 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1843 }
1844 goto duplicate_instr;
1845
1846 case OP_STI:
1847 {
1848 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1849
1850 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1851 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1852 {
1853 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1854 fInhibitIRQInstr = true;
1855 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1856 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1857 }
1858 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1859
1860 if (RT_SUCCESS(rc))
1861 {
1862 DISCPUSTATE cpu = *pCpu;
1863 unsigned cbInstr;
1864 int disret;
1865 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1866
1867 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1868
1869 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1870 { /* Force pNextInstrHC out of scope after using it */
1871 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1872 if (pNextInstrHC == NULL)
1873 {
1874 AssertFailed();
1875 return VERR_PATCHING_REFUSED;
1876 }
1877
1878 // Disassemble the next instruction
1879 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1880 }
1881 if (disret == false)
1882 {
1883 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1884 return VERR_PATCHING_REFUSED;
1885 }
1886 pReturnInstrGC = pNextInstrGC + cbInstr;
1887
1888 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1889 || pReturnInstrGC <= pInstrGC
1890 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1891 )
1892 {
1893 /* Not an exit point for function duplication patches */
1894 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1895 && RT_SUCCESS(rc))
1896 {
1897 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1898 rc = VWRN_CONTINUE_RECOMPILE;
1899 }
1900 else
1901 rc = VINF_SUCCESS; //exit point
1902 }
1903 else {
1904 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1905 rc = VERR_PATCHING_REFUSED; //not allowed!!
1906 }
1907 }
1908 break;
1909 }
1910
1911 case OP_POPF:
1912 {
1913 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1914
1915 /* Not an exit point for IDT handler or function replacement patches */
1916 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1917 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1918 fGenerateJmpBack = false;
1919
1920 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1921 if (RT_SUCCESS(rc))
1922 {
1923 if (fGenerateJmpBack == false)
1924 {
1925 /* Not an exit point for IDT handler or function replacement patches */
1926 rc = VWRN_CONTINUE_RECOMPILE;
1927 }
1928 else
1929 {
1930 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1931 rc = VINF_SUCCESS; /* exit point! */
1932 }
1933 }
1934 break;
1935 }
1936
1937 case OP_PUSHF:
1938 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1939 if (RT_SUCCESS(rc))
1940 rc = VWRN_CONTINUE_RECOMPILE;
1941 break;
1942
1943 case OP_PUSH:
1944 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1945 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1946 {
1947 rc = patmPatchGenPushCS(pVM, pPatch);
1948 if (RT_SUCCESS(rc))
1949 rc = VWRN_CONTINUE_RECOMPILE;
1950 break;
1951 }
1952 goto duplicate_instr;
1953
1954 case OP_IRET:
1955 Log(("IRET at %RRv\n", pCurInstrGC));
1956 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1957 if (RT_SUCCESS(rc))
1958 {
1959 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1960 rc = VINF_SUCCESS; /* exit point by definition */
1961 }
1962 break;
1963
1964 case OP_ILLUD2:
1965 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1966 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1967 if (RT_SUCCESS(rc))
1968 rc = VINF_SUCCESS; /* exit point by definition */
1969 Log(("Illegal opcode (0xf 0xb)\n"));
1970 break;
1971
1972 case OP_CPUID:
1973 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1974 if (RT_SUCCESS(rc))
1975 rc = VWRN_CONTINUE_RECOMPILE;
1976 break;
1977
1978 case OP_STR:
1979#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
1980 /* Now safe because our shadow TR entry is identical to the guest's. */
1981 goto duplicate_instr;
1982#endif
1983 case OP_SLDT:
1984 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1985 if (RT_SUCCESS(rc))
1986 rc = VWRN_CONTINUE_RECOMPILE;
1987 break;
1988
1989 case OP_SGDT:
1990 case OP_SIDT:
1991 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1992 if (RT_SUCCESS(rc))
1993 rc = VWRN_CONTINUE_RECOMPILE;
1994 break;
1995
1996 case OP_RETN:
1997 /* retn is an exit point for function patches */
1998 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1999 if (RT_SUCCESS(rc))
2000 rc = VINF_SUCCESS; /* exit point by definition */
2001 break;
2002
2003 case OP_SYSEXIT:
2004 /* Duplicate it, so it can be emulated in GC (or fault). */
2005 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2006 if (RT_SUCCESS(rc))
2007 rc = VINF_SUCCESS; /* exit point by definition */
2008 break;
2009
2010 case OP_CALL:
2011 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2012 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2013 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2014 */
2015 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2016 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2017 {
2018 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2019 if (RT_SUCCESS(rc))
2020 {
2021 rc = VWRN_CONTINUE_RECOMPILE;
2022 }
2023 break;
2024 }
2025 goto gen_illegal_instr;
2026
2027 case OP_JMP:
2028 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2029 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2030 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2031 */
2032 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2033 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2034 {
2035 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2036 if (RT_SUCCESS(rc))
2037 rc = VINF_SUCCESS; /* end of branch */
2038 break;
2039 }
2040 goto gen_illegal_instr;
2041
2042 case OP_INT3:
2043 case OP_INT:
2044 case OP_INTO:
2045 goto gen_illegal_instr;
2046
2047 case OP_MOV_DR:
2048 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2049 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2050 {
2051 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2052 if (RT_SUCCESS(rc))
2053 rc = VWRN_CONTINUE_RECOMPILE;
2054 break;
2055 }
2056 goto duplicate_instr;
2057
2058 case OP_MOV_CR:
2059 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2060 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2061 {
2062 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2063 if (RT_SUCCESS(rc))
2064 rc = VWRN_CONTINUE_RECOMPILE;
2065 break;
2066 }
2067 goto duplicate_instr;
2068
2069 default:
2070 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2071 {
2072gen_illegal_instr:
2073 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2074 if (RT_SUCCESS(rc))
2075 rc = VINF_SUCCESS; /* exit point by definition */
2076 }
2077 else
2078 {
2079duplicate_instr:
2080 Log(("patmPatchGenDuplicate\n"));
2081 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2082 if (RT_SUCCESS(rc))
2083 rc = VWRN_CONTINUE_RECOMPILE;
2084 }
2085 break;
2086 }
2087
2088end:
2089
2090 if ( !fInhibitIRQInstr
2091 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2092 {
2093 int rc2;
2094 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2095
2096 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2097 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2098 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2099 {
2100 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2101
2102 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2103 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2104 rc = VINF_SUCCESS; /* end of the line */
2105 }
2106 else
2107 {
2108 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2109 }
2110 if (RT_FAILURE(rc2))
2111 rc = rc2;
2112 }
2113
2114 if (RT_SUCCESS(rc))
2115 {
2116 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2117 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2118 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2119 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2120 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2121 )
2122 {
2123 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2124
2125 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2126 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2127
2128 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2129 AssertRC(rc);
2130 }
2131 }
2132 return rc;
2133}
2134
2135
2136#ifdef LOG_ENABLED
2137
2138/**
2139 * Add a disasm jump record (temporary for prevent duplicate analysis)
2140 *
2141 * @param pVM Pointer to the VM.
2142 * @param pPatch Patch structure ptr
2143 * @param pInstrGC Guest context pointer to privileged instruction
2144 *
2145 */
2146static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2147{
2148 PAVLPVNODECORE pRec;
2149
2150 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2151 Assert(pRec);
2152 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2153
2154 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2155 Assert(ret);
2156}
2157
2158/**
2159 * Checks if jump target has been analysed before.
2160 *
2161 * @returns VBox status code.
2162 * @param pPatch Patch struct
2163 * @param pInstrGC Jump target
2164 *
2165 */
2166static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2167{
2168 PAVLPVNODECORE pRec;
2169
2170 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2171 if (pRec)
2172 return true;
2173 return false;
2174}
2175
2176/**
2177 * For proper disassembly of the final patch block
2178 *
2179 * @returns VBox status code.
2180 * @param pVM Pointer to the VM.
2181 * @param pCpu CPU disassembly state
2182 * @param pInstrGC Guest context pointer to privileged instruction
2183 * @param pCurInstrGC Guest context pointer to the current instruction
2184 * @param pCacheRec Cache record ptr
2185 *
2186 */
2187int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2188{
2189 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2190 NOREF(pInstrGC);
2191
2192 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2193 {
2194 /* Could be an int3 inserted in a call patch. Check to be sure */
2195 DISCPUSTATE cpu;
2196 RTRCPTR pOrgJumpGC;
2197
2198 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2199
2200 { /* Force pOrgJumpHC out of scope after using it */
2201 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2202
2203 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2204 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2205 return VINF_SUCCESS;
2206 }
2207 return VWRN_CONTINUE_ANALYSIS;
2208 }
2209
2210 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2211 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2212 {
2213 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2214 return VWRN_CONTINUE_ANALYSIS;
2215 }
2216
2217 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2218 || pCpu->pCurInstr->uOpcode == OP_INT
2219 || pCpu->pCurInstr->uOpcode == OP_IRET
2220 || pCpu->pCurInstr->uOpcode == OP_RETN
2221 || pCpu->pCurInstr->uOpcode == OP_RETF
2222 )
2223 {
2224 return VINF_SUCCESS;
2225 }
2226
2227 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2228 return VINF_SUCCESS;
2229
2230 return VWRN_CONTINUE_ANALYSIS;
2231}
2232
2233
2234/**
2235 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2236 *
2237 * @returns VBox status code.
2238 * @param pVM Pointer to the VM.
2239 * @param pInstrGC Guest context pointer to the initial privileged instruction
2240 * @param pCurInstrGC Guest context pointer to the current instruction
2241 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2242 * @param pCacheRec Cache record ptr
2243 *
2244 */
2245int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2246{
2247 DISCPUSTATE cpu;
2248 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2249 int rc = VWRN_CONTINUE_ANALYSIS;
2250 uint32_t cbInstr, delta;
2251 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2252 bool disret;
2253 char szOutput[256];
2254
2255 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2256
2257 /* We need this to determine branch targets (and for disassembling). */
2258 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2259
2260 while (rc == VWRN_CONTINUE_ANALYSIS)
2261 {
2262 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2263 if (pCurInstrHC == NULL)
2264 {
2265 rc = VERR_PATCHING_REFUSED;
2266 goto end;
2267 }
2268
2269 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2270 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2271 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2272 {
2273 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2274
2275 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2276 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2277 else
2278 Log(("DIS %s", szOutput));
2279
2280 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2281 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2282 {
2283 rc = VINF_SUCCESS;
2284 goto end;
2285 }
2286 }
2287 else
2288 Log(("DIS: %s", szOutput));
2289
2290 if (disret == false)
2291 {
2292 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2293 rc = VINF_SUCCESS;
2294 goto end;
2295 }
2296
2297 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2298 if (rc != VWRN_CONTINUE_ANALYSIS) {
2299 break; //done!
2300 }
2301
2302 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2303 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2304 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2305 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2306 )
2307 {
2308 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2309 RTRCPTR pOrgTargetGC;
2310
2311 if (pTargetGC == 0)
2312 {
2313 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2314 rc = VERR_PATCHING_REFUSED;
2315 break;
2316 }
2317
2318 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2319 {
2320 //jump back to guest code
2321 rc = VINF_SUCCESS;
2322 goto end;
2323 }
2324 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2325
2326 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2327 {
2328 rc = VINF_SUCCESS;
2329 goto end;
2330 }
2331
2332 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2333 {
2334 /* New jump, let's check it. */
2335 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2336
2337 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2338 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2339 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2340
2341 if (rc != VINF_SUCCESS) {
2342 break; //done!
2343 }
2344 }
2345 if (cpu.pCurInstr->uOpcode == OP_JMP)
2346 {
2347 /* Unconditional jump; return to caller. */
2348 rc = VINF_SUCCESS;
2349 goto end;
2350 }
2351
2352 rc = VWRN_CONTINUE_ANALYSIS;
2353 }
2354 pCurInstrGC += cbInstr;
2355 }
2356end:
2357 return rc;
2358}
2359
2360/**
2361 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2362 *
2363 * @returns VBox status code.
2364 * @param pVM Pointer to the VM.
2365 * @param pInstrGC Guest context pointer to the initial privileged instruction
2366 * @param pCurInstrGC Guest context pointer to the current instruction
2367 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2368 * @param pCacheRec Cache record ptr
2369 *
2370 */
2371int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2372{
2373 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2374
2375 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2376 /* Free all disasm jump records. */
2377 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2378 return rc;
2379}
2380
2381#endif /* LOG_ENABLED */
2382
2383/**
2384 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2385 * If so, this patch is permanently disabled.
2386 *
2387 * @param pVM Pointer to the VM.
2388 * @param pInstrGC Guest context pointer to instruction
2389 * @param pConflictGC Guest context pointer to check
2390 *
2391 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2392 *
2393 */
2394VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2395{
2396 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2397 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2398 if (pTargetPatch)
2399 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2400 return VERR_PATCH_NO_CONFLICT;
2401}
2402
2403/**
2404 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2405 *
2406 * @returns VBox status code.
2407 * @param pVM Pointer to the VM.
2408 * @param pInstrGC Guest context pointer to privileged instruction
2409 * @param pCurInstrGC Guest context pointer to the current instruction
2410 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2411 * @param pCacheRec Cache record ptr
2412 *
2413 */
2414static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2415{
2416 DISCPUSTATE cpu;
2417 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2418 int rc = VWRN_CONTINUE_ANALYSIS;
2419 uint32_t cbInstr;
2420 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2421 bool disret;
2422#ifdef LOG_ENABLED
2423 char szOutput[256];
2424#endif
2425
2426 while (rc == VWRN_CONTINUE_RECOMPILE)
2427 {
2428 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2429 if (pCurInstrHC == NULL)
2430 {
2431 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2432 goto end;
2433 }
2434#ifdef LOG_ENABLED
2435 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2436 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2437 Log(("Recompile: %s", szOutput));
2438#else
2439 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2440#endif
2441 if (disret == false)
2442 {
2443 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2444
2445 /* Add lookup record for patch to guest address translation */
2446 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2447 patmPatchGenIllegalInstr(pVM, pPatch);
2448 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2449 goto end;
2450 }
2451
2452 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2453 if (rc != VWRN_CONTINUE_RECOMPILE)
2454 {
2455 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2456 if ( rc == VINF_SUCCESS
2457 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2458 {
2459 DISCPUSTATE cpunext;
2460 uint32_t opsizenext;
2461 uint8_t *pNextInstrHC;
2462 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2463
2464 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2465
2466 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2467 * Recompile the next instruction as well
2468 */
2469 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2470 if (pNextInstrHC == NULL)
2471 {
2472 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2473 goto end;
2474 }
2475 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2476 if (disret == false)
2477 {
2478 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2479 goto end;
2480 }
2481 switch(cpunext.pCurInstr->uOpcode)
2482 {
2483 case OP_IRET: /* inhibit cleared in generated code */
2484 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2485 case OP_HLT:
2486 break; /* recompile these */
2487
2488 default:
2489 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2490 {
2491 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2492
2493 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2494 AssertRC(rc);
2495 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2496 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2497 }
2498 break;
2499 }
2500
2501 /* Note: after a cli we must continue to a proper exit point */
2502 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2503 {
2504 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2505 if (RT_SUCCESS(rc))
2506 {
2507 rc = VINF_SUCCESS;
2508 goto end;
2509 }
2510 break;
2511 }
2512 else
2513 rc = VWRN_CONTINUE_RECOMPILE;
2514 }
2515 else
2516 break; /* done! */
2517 }
2518
2519 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2520
2521
2522 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2523 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2524 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2525 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2526 )
2527 {
2528 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2529 if (addr == 0)
2530 {
2531 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2532 rc = VERR_PATCHING_REFUSED;
2533 break;
2534 }
2535
2536 Log(("Jump encountered target %RRv\n", addr));
2537
2538 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2539 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2540 {
2541 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2542 /* First we need to finish this linear code stream until the next exit point. */
2543 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2544 if (RT_FAILURE(rc))
2545 {
2546 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2547 break; //fatal error
2548 }
2549 }
2550
2551 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2552 {
2553 /* New code; let's recompile it. */
2554 Log(("patmRecompileCodeStream continue with jump\n"));
2555
2556 /*
2557 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2558 * this patch so we can continue our analysis
2559 *
2560 * We rely on CSAM to detect and resolve conflicts
2561 */
2562 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2563 if(pTargetPatch)
2564 {
2565 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2566 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2567 }
2568
2569 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2570 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2571 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2572
2573 if(pTargetPatch)
2574 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2575
2576 if (RT_FAILURE(rc))
2577 {
2578 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2579 break; //done!
2580 }
2581 }
2582 /* Always return to caller here; we're done! */
2583 rc = VINF_SUCCESS;
2584 goto end;
2585 }
2586 else
2587 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2588 {
2589 rc = VINF_SUCCESS;
2590 goto end;
2591 }
2592 pCurInstrGC += cbInstr;
2593 }
2594end:
2595 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2596 return rc;
2597}
2598
2599
2600/**
2601 * Generate the jump from guest to patch code
2602 *
2603 * @returns VBox status code.
2604 * @param pVM Pointer to the VM.
2605 * @param pPatch Patch record
2606 * @param pCacheRec Guest translation lookup cache record
2607 */
2608static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2609{
2610 uint8_t temp[8];
2611 uint8_t *pPB;
2612 int rc;
2613
2614 Assert(pPatch->cbPatchJump <= sizeof(temp));
2615 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2616
2617 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2618 Assert(pPB);
2619
2620#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2621 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2622 {
2623 Assert(pPatch->pPatchJumpDestGC);
2624
2625 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2626 {
2627 // jmp [PatchCode]
2628 if (fAddFixup)
2629 {
2630 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2631 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2632 {
2633 Log(("Relocation failed for the jump in the guest code!!\n"));
2634 return VERR_PATCHING_REFUSED;
2635 }
2636 }
2637
2638 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2639 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2640 }
2641 else
2642 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2643 {
2644 // jmp [PatchCode]
2645 if (fAddFixup)
2646 {
2647 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2648 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2649 {
2650 Log(("Relocation failed for the jump in the guest code!!\n"));
2651 return VERR_PATCHING_REFUSED;
2652 }
2653 }
2654
2655 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2656 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2657 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2658 }
2659 else
2660 {
2661 Assert(0);
2662 return VERR_PATCHING_REFUSED;
2663 }
2664 }
2665 else
2666#endif
2667 {
2668 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2669
2670 // jmp [PatchCode]
2671 if (fAddFixup)
2672 {
2673 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
2674 PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2675 {
2676 Log(("Relocation failed for the jump in the guest code!!\n"));
2677 return VERR_PATCHING_REFUSED;
2678 }
2679 }
2680 temp[0] = 0xE9; //jmp
2681 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2682 }
2683 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2684 AssertRC(rc);
2685
2686 if (rc == VINF_SUCCESS)
2687 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2688
2689 return rc;
2690}
2691
2692/**
2693 * Remove the jump from guest to patch code
2694 *
2695 * @returns VBox status code.
2696 * @param pVM Pointer to the VM.
2697 * @param pPatch Patch record
2698 */
2699static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2700{
2701#ifdef DEBUG
2702 DISCPUSTATE cpu;
2703 char szOutput[256];
2704 uint32_t cbInstr, i = 0;
2705 bool disret;
2706
2707 while (i < pPatch->cbPrivInstr)
2708 {
2709 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2710 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2711 if (disret == false)
2712 break;
2713
2714 Log(("Org patch jump: %s", szOutput));
2715 Assert(cbInstr);
2716 i += cbInstr;
2717 }
2718#endif
2719
2720 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2721 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2722#ifdef DEBUG
2723 if (rc == VINF_SUCCESS)
2724 {
2725 i = 0;
2726 while (i < pPatch->cbPrivInstr)
2727 {
2728 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2729 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2730 if (disret == false)
2731 break;
2732
2733 Log(("Org instr: %s", szOutput));
2734 Assert(cbInstr);
2735 i += cbInstr;
2736 }
2737 }
2738#endif
2739 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2740 return rc;
2741}
2742
2743/**
2744 * Generate the call from guest to patch code
2745 *
2746 * @returns VBox status code.
2747 * @param pVM Pointer to the VM.
2748 * @param pPatch Patch record
2749 * @param pInstrHC HC address where to insert the jump
2750 * @param pCacheRec Guest translation cache record
2751 */
2752static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2753{
2754 uint8_t temp[8];
2755 uint8_t *pPB;
2756 int rc;
2757
2758 Assert(pPatch->cbPatchJump <= sizeof(temp));
2759
2760 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2761 Assert(pPB);
2762
2763 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2764
2765 // jmp [PatchCode]
2766 if (fAddFixup)
2767 {
2768 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
2769 pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2770 {
2771 Log(("Relocation failed for the jump in the guest code!!\n"));
2772 return VERR_PATCHING_REFUSED;
2773 }
2774 }
2775
2776 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2777 temp[0] = pPatch->aPrivInstr[0];
2778 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2779
2780 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2781 AssertRC(rc);
2782
2783 return rc;
2784}
2785
2786
2787/**
2788 * Patch cli/sti pushf/popf instruction block at specified location
2789 *
2790 * @returns VBox status code.
2791 * @param pVM Pointer to the VM.
2792 * @param pInstrGC Guest context point to privileged instruction
2793 * @param pInstrHC Host context point to privileged instruction
2794 * @param uOpcode Instruction opcode
2795 * @param uOpSize Size of starting instruction
2796 * @param pPatchRec Patch record
2797 *
2798 * @note returns failure if patching is not allowed or possible
2799 *
2800 */
2801static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2802 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2803{
2804 PPATCHINFO pPatch = &pPatchRec->patch;
2805 int rc = VERR_PATCHING_REFUSED;
2806 uint32_t orgOffsetPatchMem = ~0;
2807 RTRCPTR pInstrStart;
2808 bool fInserted;
2809 NOREF(pInstrHC); NOREF(uOpSize);
2810
2811 /* Save original offset (in case of failures later on) */
2812 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2813 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2814
2815 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2816 switch (uOpcode)
2817 {
2818 case OP_MOV:
2819 break;
2820
2821 case OP_CLI:
2822 case OP_PUSHF:
2823 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2824 /* Note: special precautions are taken when disabling and enabling such patches. */
2825 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2826 break;
2827
2828 default:
2829 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2830 {
2831 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2832 return VERR_INVALID_PARAMETER;
2833 }
2834 }
2835
2836 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2837 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2838
2839 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2840 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2841 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2842 )
2843 {
2844 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2845 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2846 rc = VERR_PATCHING_REFUSED;
2847 goto failure;
2848 }
2849
2850 pPatch->nrPatch2GuestRecs = 0;
2851 pInstrStart = pInstrGC;
2852
2853#ifdef PATM_ENABLE_CALL
2854 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2855#endif
2856
2857 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2858 pPatch->uCurPatchOffset = 0;
2859
2860 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2861 {
2862 Assert(pPatch->flags & PATMFL_INTHANDLER);
2863
2864 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2865 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2866 if (RT_FAILURE(rc))
2867 goto failure;
2868 }
2869
2870 /***************************************************************************************************************************/
2871 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2872 /***************************************************************************************************************************/
2873#ifdef VBOX_WITH_STATISTICS
2874 if (!(pPatch->flags & PATMFL_SYSENTER))
2875 {
2876 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2877 if (RT_FAILURE(rc))
2878 goto failure;
2879 }
2880#endif
2881
2882 PATMP2GLOOKUPREC cacheRec;
2883 RT_ZERO(cacheRec);
2884 cacheRec.pPatch = pPatch;
2885
2886 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2887 /* Free leftover lock if any. */
2888 if (cacheRec.Lock.pvMap)
2889 {
2890 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2891 cacheRec.Lock.pvMap = NULL;
2892 }
2893 if (rc != VINF_SUCCESS)
2894 {
2895 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2896 goto failure;
2897 }
2898
2899 /* Calculated during analysis. */
2900 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2901 {
2902 /* Most likely cause: we encountered an illegal instruction very early on. */
2903 /** @todo could turn it into an int3 callable patch. */
2904 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2905 rc = VERR_PATCHING_REFUSED;
2906 goto failure;
2907 }
2908
2909 /* size of patch block */
2910 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2911
2912
2913 /* Update free pointer in patch memory. */
2914 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2915 /* Round to next 8 byte boundary. */
2916 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2917
2918 /*
2919 * Insert into patch to guest lookup tree
2920 */
2921 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2922 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2923 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2924 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2925 if (!fInserted)
2926 {
2927 rc = VERR_PATCHING_REFUSED;
2928 goto failure;
2929 }
2930
2931 /* Note that patmr3SetBranchTargets can install additional patches!! */
2932 rc = patmr3SetBranchTargets(pVM, pPatch);
2933 if (rc != VINF_SUCCESS)
2934 {
2935 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2936 goto failure;
2937 }
2938
2939#ifdef LOG_ENABLED
2940 Log(("Patch code ----------------------------------------------------------\n"));
2941 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2942 /* Free leftover lock if any. */
2943 if (cacheRec.Lock.pvMap)
2944 {
2945 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2946 cacheRec.Lock.pvMap = NULL;
2947 }
2948 Log(("Patch code ends -----------------------------------------------------\n"));
2949#endif
2950
2951 /* make a copy of the guest code bytes that will be overwritten */
2952 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2953
2954 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2955 AssertRC(rc);
2956
2957 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2958 {
2959 /*uint8_t bASMInt3 = 0xCC; - unused */
2960
2961 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2962 /* Replace first opcode byte with 'int 3'. */
2963 rc = patmActivateInt3Patch(pVM, pPatch);
2964 if (RT_FAILURE(rc))
2965 goto failure;
2966
2967 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2968 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2969
2970 pPatch->flags &= ~PATMFL_INSTR_HINT;
2971 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2972 }
2973 else
2974 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2975 {
2976 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2977 /* now insert a jump in the guest code */
2978 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2979 AssertRC(rc);
2980 if (RT_FAILURE(rc))
2981 goto failure;
2982
2983 }
2984
2985 patmR3DbgAddPatch(pVM, pPatchRec);
2986
2987 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2988
2989 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2990 pPatch->pTempInfo->nrIllegalInstr = 0;
2991
2992 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2993
2994 pPatch->uState = PATCH_ENABLED;
2995 return VINF_SUCCESS;
2996
2997failure:
2998 if (pPatchRec->CoreOffset.Key)
2999 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3000
3001 patmEmptyTree(pVM, &pPatch->FixupTree);
3002 pPatch->nrFixups = 0;
3003
3004 patmEmptyTree(pVM, &pPatch->JumpTree);
3005 pPatch->nrJumpRecs = 0;
3006
3007 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3008 pPatch->pTempInfo->nrIllegalInstr = 0;
3009
3010 /* Turn this cli patch into a dummy. */
3011 pPatch->uState = PATCH_REFUSED;
3012 pPatch->pPatchBlockOffset = 0;
3013
3014 // Give back the patch memory we no longer need
3015 Assert(orgOffsetPatchMem != (uint32_t)~0);
3016 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3017
3018 return rc;
3019}
3020
3021/**
3022 * Patch IDT handler
3023 *
3024 * @returns VBox status code.
3025 * @param pVM Pointer to the VM.
3026 * @param pInstrGC Guest context point to privileged instruction
3027 * @param uOpSize Size of starting instruction
3028 * @param pPatchRec Patch record
3029 * @param pCacheRec Cache record ptr
3030 *
3031 * @note returns failure if patching is not allowed or possible
3032 *
3033 */
3034static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3035{
3036 PPATCHINFO pPatch = &pPatchRec->patch;
3037 bool disret;
3038 DISCPUSTATE cpuPush, cpuJmp;
3039 uint32_t cbInstr;
3040 RTRCPTR pCurInstrGC = pInstrGC;
3041 uint8_t *pCurInstrHC, *pInstrHC;
3042 uint32_t orgOffsetPatchMem = ~0;
3043
3044 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3045 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3046
3047 /*
3048 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3049 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3050 * condition here and only patch the common entypoint once.
3051 */
3052 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3053 Assert(disret);
3054 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3055 {
3056 RTRCPTR pJmpInstrGC;
3057 int rc;
3058 pCurInstrGC += cbInstr;
3059
3060 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3061 if ( disret
3062 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3063 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3064 )
3065 {
3066 bool fInserted;
3067 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3068 if (pJmpPatch == 0)
3069 {
3070 /* Patch it first! */
3071 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3072 if (rc != VINF_SUCCESS)
3073 goto failure;
3074 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3075 Assert(pJmpPatch);
3076 }
3077 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3078 goto failure;
3079
3080 /* save original offset (in case of failures later on) */
3081 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3082
3083 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3084 pPatch->uCurPatchOffset = 0;
3085 pPatch->nrPatch2GuestRecs = 0;
3086
3087#ifdef VBOX_WITH_STATISTICS
3088 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3089 if (RT_FAILURE(rc))
3090 goto failure;
3091#endif
3092
3093 /* Install fake cli patch (to clear the virtual IF) */
3094 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3095 if (RT_FAILURE(rc))
3096 goto failure;
3097
3098 /* Add lookup record for patch to guest address translation (for the push) */
3099 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3100
3101 /* Duplicate push. */
3102 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3103 if (RT_FAILURE(rc))
3104 goto failure;
3105
3106 /* Generate jump to common entrypoint. */
3107 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3108 if (RT_FAILURE(rc))
3109 goto failure;
3110
3111 /* size of patch block */
3112 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3113
3114 /* Update free pointer in patch memory. */
3115 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3116 /* Round to next 8 byte boundary */
3117 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3118
3119 /* There's no jump from guest to patch code. */
3120 pPatch->cbPatchJump = 0;
3121
3122
3123#ifdef LOG_ENABLED
3124 Log(("Patch code ----------------------------------------------------------\n"));
3125 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3126 Log(("Patch code ends -----------------------------------------------------\n"));
3127#endif
3128 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3129
3130 /*
3131 * Insert into patch to guest lookup tree
3132 */
3133 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3134 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3135 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3136 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3137 patmR3DbgAddPatch(pVM, pPatchRec);
3138
3139 pPatch->uState = PATCH_ENABLED;
3140
3141 return VINF_SUCCESS;
3142 }
3143 }
3144failure:
3145 /* Give back the patch memory we no longer need */
3146 if (orgOffsetPatchMem != (uint32_t)~0)
3147 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3148
3149 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3150}
3151
3152/**
3153 * Install a trampoline to call a guest trap handler directly
3154 *
3155 * @returns VBox status code.
3156 * @param pVM Pointer to the VM.
3157 * @param pInstrGC Guest context point to privileged instruction
3158 * @param pPatchRec Patch record
3159 * @param pCacheRec Cache record ptr
3160 *
3161 */
3162static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3163{
3164 PPATCHINFO pPatch = &pPatchRec->patch;
3165 int rc = VERR_PATCHING_REFUSED;
3166 uint32_t orgOffsetPatchMem = ~0;
3167 bool fInserted;
3168
3169 // save original offset (in case of failures later on)
3170 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3171
3172 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3173 pPatch->uCurPatchOffset = 0;
3174 pPatch->nrPatch2GuestRecs = 0;
3175
3176#ifdef VBOX_WITH_STATISTICS
3177 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3178 if (RT_FAILURE(rc))
3179 goto failure;
3180#endif
3181
3182 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3183 if (RT_FAILURE(rc))
3184 goto failure;
3185
3186 /* size of patch block */
3187 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3188
3189 /* Update free pointer in patch memory. */
3190 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3191 /* Round to next 8 byte boundary */
3192 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3193
3194 /* There's no jump from guest to patch code. */
3195 pPatch->cbPatchJump = 0;
3196
3197#ifdef LOG_ENABLED
3198 Log(("Patch code ----------------------------------------------------------\n"));
3199 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3200 Log(("Patch code ends -----------------------------------------------------\n"));
3201#endif
3202 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3203 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3204
3205 /*
3206 * Insert into patch to guest lookup tree
3207 */
3208 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3209 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3210 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3211 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3212 patmR3DbgAddPatch(pVM, pPatchRec);
3213
3214 pPatch->uState = PATCH_ENABLED;
3215 return VINF_SUCCESS;
3216
3217failure:
3218 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3219
3220 /* Turn this cli patch into a dummy. */
3221 pPatch->uState = PATCH_REFUSED;
3222 pPatch->pPatchBlockOffset = 0;
3223
3224 /* Give back the patch memory we no longer need */
3225 Assert(orgOffsetPatchMem != (uint32_t)~0);
3226 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3227
3228 return rc;
3229}
3230
3231
3232#ifdef LOG_ENABLED
3233/**
3234 * Check if the instruction is patched as a common idt handler
3235 *
3236 * @returns true or false
3237 * @param pVM Pointer to the VM.
3238 * @param pInstrGC Guest context point to the instruction
3239 *
3240 */
3241static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3242{
3243 PPATMPATCHREC pRec;
3244
3245 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3246 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3247 return true;
3248 return false;
3249}
3250#endif //DEBUG
3251
3252
3253/**
3254 * Duplicates a complete function
3255 *
3256 * @returns VBox status code.
3257 * @param pVM Pointer to the VM.
3258 * @param pInstrGC Guest context point to privileged instruction
3259 * @param pPatchRec Patch record
3260 * @param pCacheRec Cache record ptr
3261 *
3262 */
3263static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3264{
3265 PPATCHINFO pPatch = &pPatchRec->patch;
3266 int rc = VERR_PATCHING_REFUSED;
3267 uint32_t orgOffsetPatchMem = ~0;
3268 bool fInserted;
3269
3270 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3271 /* Save original offset (in case of failures later on). */
3272 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3273
3274 /* We will not go on indefinitely with call instruction handling. */
3275 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3276 {
3277 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3278 return VERR_PATCHING_REFUSED;
3279 }
3280
3281 pVM->patm.s.ulCallDepth++;
3282
3283#ifdef PATM_ENABLE_CALL
3284 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3285#endif
3286
3287 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3288
3289 pPatch->nrPatch2GuestRecs = 0;
3290 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3291 pPatch->uCurPatchOffset = 0;
3292
3293 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3294 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3295 if (RT_FAILURE(rc))
3296 goto failure;
3297
3298#ifdef VBOX_WITH_STATISTICS
3299 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3300 if (RT_FAILURE(rc))
3301 goto failure;
3302#endif
3303
3304 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3305 if (rc != VINF_SUCCESS)
3306 {
3307 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3308 goto failure;
3309 }
3310
3311 //size of patch block
3312 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3313
3314 //update free pointer in patch memory
3315 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3316 /* Round to next 8 byte boundary. */
3317 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3318
3319 pPatch->uState = PATCH_ENABLED;
3320
3321 /*
3322 * Insert into patch to guest lookup tree
3323 */
3324 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3325 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3326 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3327 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3328 if (!fInserted)
3329 {
3330 rc = VERR_PATCHING_REFUSED;
3331 goto failure;
3332 }
3333
3334 /* Note that patmr3SetBranchTargets can install additional patches!! */
3335 rc = patmr3SetBranchTargets(pVM, pPatch);
3336 if (rc != VINF_SUCCESS)
3337 {
3338 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3339 goto failure;
3340 }
3341
3342 patmR3DbgAddPatch(pVM, pPatchRec);
3343
3344#ifdef LOG_ENABLED
3345 Log(("Patch code ----------------------------------------------------------\n"));
3346 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3347 Log(("Patch code ends -----------------------------------------------------\n"));
3348#endif
3349
3350 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3351
3352 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3353 pPatch->pTempInfo->nrIllegalInstr = 0;
3354
3355 pVM->patm.s.ulCallDepth--;
3356 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3357 return VINF_SUCCESS;
3358
3359failure:
3360 if (pPatchRec->CoreOffset.Key)
3361 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3362
3363 patmEmptyTree(pVM, &pPatch->FixupTree);
3364 pPatch->nrFixups = 0;
3365
3366 patmEmptyTree(pVM, &pPatch->JumpTree);
3367 pPatch->nrJumpRecs = 0;
3368
3369 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3370 pPatch->pTempInfo->nrIllegalInstr = 0;
3371
3372 /* Turn this cli patch into a dummy. */
3373 pPatch->uState = PATCH_REFUSED;
3374 pPatch->pPatchBlockOffset = 0;
3375
3376 // Give back the patch memory we no longer need
3377 Assert(orgOffsetPatchMem != (uint32_t)~0);
3378 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3379
3380 pVM->patm.s.ulCallDepth--;
3381 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3382 return rc;
3383}
3384
3385/**
3386 * Creates trampoline code to jump inside an existing patch
3387 *
3388 * @returns VBox status code.
3389 * @param pVM Pointer to the VM.
3390 * @param pInstrGC Guest context point to privileged instruction
3391 * @param pPatchRec Patch record
3392 *
3393 */
3394static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3395{
3396 PPATCHINFO pPatch = &pPatchRec->patch;
3397 RTRCPTR pPage, pPatchTargetGC = 0;
3398 uint32_t orgOffsetPatchMem = ~0;
3399 int rc = VERR_PATCHING_REFUSED;
3400 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3401 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3402 bool fInserted = false;
3403
3404 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3405 /* Save original offset (in case of failures later on). */
3406 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3407
3408 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3409 /** @todo we already checked this before */
3410 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3411
3412 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3413 if (pPatchPage)
3414 {
3415 uint32_t i;
3416
3417 for (i=0;i<pPatchPage->cCount;i++)
3418 {
3419 if (pPatchPage->papPatch[i])
3420 {
3421 pPatchToJmp = pPatchPage->papPatch[i];
3422
3423 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3424 && pPatchToJmp->uState == PATCH_ENABLED)
3425 {
3426 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3427 if (pPatchTargetGC)
3428 {
3429 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3430 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3431 Assert(pPatchToGuestRec);
3432
3433 pPatchToGuestRec->fJumpTarget = true;
3434 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3435 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3436 break;
3437 }
3438 }
3439 }
3440 }
3441 }
3442 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3443
3444 /*
3445 * Only record the trampoline patch if this is the first patch to the target
3446 * or we recorded other patches already.
3447 * The goal is to refuse refreshing function duplicates if the guest
3448 * modifies code after a saved state was loaded because it is not possible
3449 * to save the relation between trampoline and target without changing the
3450 * saved satte version.
3451 */
3452 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3453 || pPatchToJmp->pTrampolinePatchesHead)
3454 {
3455 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3456 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3457 if (!pTrampRec)
3458 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3459
3460 pTrampRec->pPatchTrampoline = pPatchRec;
3461 }
3462
3463 pPatch->nrPatch2GuestRecs = 0;
3464 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3465 pPatch->uCurPatchOffset = 0;
3466
3467 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3468 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3469 if (RT_FAILURE(rc))
3470 goto failure;
3471
3472#ifdef VBOX_WITH_STATISTICS
3473 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3474 if (RT_FAILURE(rc))
3475 goto failure;
3476#endif
3477
3478 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3479 if (RT_FAILURE(rc))
3480 goto failure;
3481
3482 /*
3483 * Insert into patch to guest lookup tree
3484 */
3485 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3486 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3487 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3488 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3489 if (!fInserted)
3490 {
3491 rc = VERR_PATCHING_REFUSED;
3492 goto failure;
3493 }
3494 patmR3DbgAddPatch(pVM, pPatchRec);
3495
3496 /* size of patch block */
3497 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3498
3499 /* Update free pointer in patch memory. */
3500 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3501 /* Round to next 8 byte boundary */
3502 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3503
3504 /* There's no jump from guest to patch code. */
3505 pPatch->cbPatchJump = 0;
3506
3507 /* Enable the patch. */
3508 pPatch->uState = PATCH_ENABLED;
3509 /* We allow this patch to be called as a function. */
3510 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3511
3512 if (pTrampRec)
3513 {
3514 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3515 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3516 }
3517 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3518 return VINF_SUCCESS;
3519
3520failure:
3521 if (pPatchRec->CoreOffset.Key)
3522 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3523
3524 patmEmptyTree(pVM, &pPatch->FixupTree);
3525 pPatch->nrFixups = 0;
3526
3527 patmEmptyTree(pVM, &pPatch->JumpTree);
3528 pPatch->nrJumpRecs = 0;
3529
3530 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3531 pPatch->pTempInfo->nrIllegalInstr = 0;
3532
3533 /* Turn this cli patch into a dummy. */
3534 pPatch->uState = PATCH_REFUSED;
3535 pPatch->pPatchBlockOffset = 0;
3536
3537 // Give back the patch memory we no longer need
3538 Assert(orgOffsetPatchMem != (uint32_t)~0);
3539 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3540
3541 if (pTrampRec)
3542 MMR3HeapFree(pTrampRec);
3543
3544 return rc;
3545}
3546
3547
3548/**
3549 * Patch branch target function for call/jump at specified location.
3550 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3551 *
3552 * @returns VBox status code.
3553 * @param pVM Pointer to the VM.
3554 * @param pCtx Pointer to the guest CPU context.
3555 *
3556 */
3557VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3558{
3559 RTRCPTR pBranchTarget, pPage;
3560 int rc;
3561 RTRCPTR pPatchTargetGC = 0;
3562 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3563
3564 pBranchTarget = pCtx->edx;
3565 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3566
3567 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3568 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3569
3570 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3571 if (pPatchPage)
3572 {
3573 uint32_t i;
3574
3575 for (i=0;i<pPatchPage->cCount;i++)
3576 {
3577 if (pPatchPage->papPatch[i])
3578 {
3579 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3580
3581 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3582 && pPatch->uState == PATCH_ENABLED)
3583 {
3584 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3585 if (pPatchTargetGC)
3586 {
3587 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3588 break;
3589 }
3590 }
3591 }
3592 }
3593 }
3594
3595 if (pPatchTargetGC)
3596 {
3597 /* Create a trampoline that also sets PATM_ASMFIX_INTERRUPTFLAG. */
3598 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3599 }
3600 else
3601 {
3602 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3603 }
3604
3605 if (rc == VINF_SUCCESS)
3606 {
3607 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3608 Assert(pPatchTargetGC);
3609 }
3610
3611 if (pPatchTargetGC)
3612 {
3613 pCtx->eax = pPatchTargetGC;
3614 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3615 }
3616 else
3617 {
3618 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3619 pCtx->eax = 0;
3620 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3621 }
3622 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3623 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3624 AssertRC(rc);
3625
3626 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3627 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3628 return VINF_SUCCESS;
3629}
3630
3631/**
3632 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3633 *
3634 * @returns VBox status code.
3635 * @param pVM Pointer to the VM.
3636 * @param pCpu Disassembly CPU structure ptr
3637 * @param pInstrGC Guest context point to privileged instruction
3638 * @param pCacheRec Cache record ptr
3639 *
3640 */
3641static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3642{
3643 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3644 int rc = VERR_PATCHING_REFUSED;
3645 DISCPUSTATE cpu;
3646 RTRCPTR pTargetGC;
3647 PPATMPATCHREC pPatchFunction;
3648 uint32_t cbInstr;
3649 bool disret;
3650
3651 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3652 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3653
3654 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3655 {
3656 rc = VERR_PATCHING_REFUSED;
3657 goto failure;
3658 }
3659
3660 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3661 if (pTargetGC == 0)
3662 {
3663 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3664 rc = VERR_PATCHING_REFUSED;
3665 goto failure;
3666 }
3667
3668 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3669 if (pPatchFunction == NULL)
3670 {
3671 for(;;)
3672 {
3673 /* It could be an indirect call (call -> jmp dest).
3674 * Note that it's dangerous to assume the jump will never change...
3675 */
3676 uint8_t *pTmpInstrHC;
3677
3678 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3679 Assert(pTmpInstrHC);
3680 if (pTmpInstrHC == 0)
3681 break;
3682
3683 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3684 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3685 break;
3686
3687 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3688 if (pTargetGC == 0)
3689 {
3690 break;
3691 }
3692
3693 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3694 break;
3695 }
3696 if (pPatchFunction == 0)
3697 {
3698 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3699 rc = VERR_PATCHING_REFUSED;
3700 goto failure;
3701 }
3702 }
3703
3704 // make a copy of the guest code bytes that will be overwritten
3705 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3706
3707 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3708 AssertRC(rc);
3709
3710 /* Now replace the original call in the guest code */
3711 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3712 AssertRC(rc);
3713 if (RT_FAILURE(rc))
3714 goto failure;
3715
3716 /* Lowest and highest address for write monitoring. */
3717 pPatch->pInstrGCLowest = pInstrGC;
3718 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3719 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3720
3721 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3722
3723 pPatch->uState = PATCH_ENABLED;
3724 return VINF_SUCCESS;
3725
3726failure:
3727 /* Turn this patch into a dummy. */
3728 pPatch->uState = PATCH_REFUSED;
3729
3730 return rc;
3731}
3732
3733/**
3734 * Replace the address in an MMIO instruction with the cached version.
3735 *
3736 * @returns VBox status code.
3737 * @param pVM Pointer to the VM.
3738 * @param pInstrGC Guest context point to privileged instruction
3739 * @param pCpu Disassembly CPU structure ptr
3740 * @param pCacheRec Cache record ptr
3741 *
3742 * @note returns failure if patching is not allowed or possible
3743 *
3744 */
3745static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3746{
3747 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3748 uint8_t *pPB;
3749 int rc = VERR_PATCHING_REFUSED;
3750
3751 Assert(pVM->patm.s.mmio.pCachedData);
3752 if (!pVM->patm.s.mmio.pCachedData)
3753 goto failure;
3754
3755 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3756 goto failure;
3757
3758 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3759 if (pPB == 0)
3760 goto failure;
3761
3762 /* Add relocation record for cached data access. */
3763 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3764 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3765 {
3766 Log(("Relocation failed for cached mmio address!!\n"));
3767 return VERR_PATCHING_REFUSED;
3768 }
3769 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3770
3771 /* Save original instruction. */
3772 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3773 AssertRC(rc);
3774
3775 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3776
3777 /* Replace address with that of the cached item. */
3778 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3779 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3780 AssertRC(rc);
3781 if (RT_FAILURE(rc))
3782 {
3783 goto failure;
3784 }
3785
3786 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3787 pVM->patm.s.mmio.pCachedData = 0;
3788 pVM->patm.s.mmio.GCPhys = 0;
3789 pPatch->uState = PATCH_ENABLED;
3790 return VINF_SUCCESS;
3791
3792failure:
3793 /* Turn this patch into a dummy. */
3794 pPatch->uState = PATCH_REFUSED;
3795
3796 return rc;
3797}
3798
3799
3800/**
3801 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3802 *
3803 * @returns VBox status code.
3804 * @param pVM Pointer to the VM.
3805 * @param pInstrGC Guest context point to privileged instruction
3806 * @param pPatch Patch record
3807 *
3808 * @note returns failure if patching is not allowed or possible
3809 *
3810 */
3811static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3812{
3813 DISCPUSTATE cpu;
3814 uint32_t cbInstr;
3815 bool disret;
3816 uint8_t *pInstrHC;
3817
3818 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3819
3820 /* Convert GC to HC address. */
3821 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3822 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3823
3824 /* Disassemble mmio instruction. */
3825 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3826 &cpu, &cbInstr);
3827 if (disret == false)
3828 {
3829 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3830 return VERR_PATCHING_REFUSED;
3831 }
3832
3833 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3834 if (cbInstr > MAX_INSTR_SIZE)
3835 return VERR_PATCHING_REFUSED;
3836 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3837 return VERR_PATCHING_REFUSED;
3838
3839 /* Add relocation record for cached data access. */
3840 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3841 {
3842 Log(("Relocation failed for cached mmio address!!\n"));
3843 return VERR_PATCHING_REFUSED;
3844 }
3845 /* Replace address with that of the cached item. */
3846 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3847
3848 /* Lowest and highest address for write monitoring. */
3849 pPatch->pInstrGCLowest = pInstrGC;
3850 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3851
3852 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3853 pVM->patm.s.mmio.pCachedData = 0;
3854 pVM->patm.s.mmio.GCPhys = 0;
3855 return VINF_SUCCESS;
3856}
3857
3858/**
3859 * Activates an int3 patch
3860 *
3861 * @returns VBox status code.
3862 * @param pVM Pointer to the VM.
3863 * @param pPatch Patch record
3864 */
3865static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3866{
3867 uint8_t bASMInt3 = 0xCC;
3868 int rc;
3869
3870 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3871 Assert(pPatch->uState != PATCH_ENABLED);
3872
3873 /* Replace first opcode byte with 'int 3'. */
3874 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3875 AssertRC(rc);
3876
3877 pPatch->cbPatchJump = sizeof(bASMInt3);
3878
3879 return rc;
3880}
3881
3882/**
3883 * Deactivates an int3 patch
3884 *
3885 * @returns VBox status code.
3886 * @param pVM Pointer to the VM.
3887 * @param pPatch Patch record
3888 */
3889static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3890{
3891 uint8_t ASMInt3 = 0xCC;
3892 int rc;
3893
3894 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3895 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3896
3897 /* Restore first opcode byte. */
3898 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3899 AssertRC(rc);
3900 return rc;
3901}
3902
3903/**
3904 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3905 * in the raw-mode context.
3906 *
3907 * @returns VBox status code.
3908 * @param pVM Pointer to the VM.
3909 * @param pInstrGC Guest context point to privileged instruction
3910 * @param pInstrHC Host context point to privileged instruction
3911 * @param pCpu Disassembly CPU structure ptr
3912 * @param pPatch Patch record
3913 *
3914 * @note returns failure if patching is not allowed or possible
3915 *
3916 */
3917int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3918{
3919 uint8_t bASMInt3 = 0xCC;
3920 int rc;
3921
3922 /* Note: Do not use patch memory here! It might called during patch installation too. */
3923 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3924
3925 /* Save the original instruction. */
3926 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3927 AssertRC(rc);
3928 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3929
3930 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3931
3932 /* Replace first opcode byte with 'int 3'. */
3933 rc = patmActivateInt3Patch(pVM, pPatch);
3934 if (RT_FAILURE(rc))
3935 goto failure;
3936
3937 /* Lowest and highest address for write monitoring. */
3938 pPatch->pInstrGCLowest = pInstrGC;
3939 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3940
3941 pPatch->uState = PATCH_ENABLED;
3942 return VINF_SUCCESS;
3943
3944failure:
3945 /* Turn this patch into a dummy. */
3946 return VERR_PATCHING_REFUSED;
3947}
3948
3949#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3950/**
3951 * Patch a jump instruction at specified location
3952 *
3953 * @returns VBox status code.
3954 * @param pVM Pointer to the VM.
3955 * @param pInstrGC Guest context point to privileged instruction
3956 * @param pInstrHC Host context point to privileged instruction
3957 * @param pCpu Disassembly CPU structure ptr
3958 * @param pPatchRec Patch record
3959 *
3960 * @note returns failure if patching is not allowed or possible
3961 *
3962 */
3963int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3964{
3965 PPATCHINFO pPatch = &pPatchRec->patch;
3966 int rc = VERR_PATCHING_REFUSED;
3967
3968 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3969 pPatch->uCurPatchOffset = 0;
3970 pPatch->cbPatchBlockSize = 0;
3971 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3972
3973 /*
3974 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3975 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3976 */
3977 switch (pCpu->pCurInstr->uOpcode)
3978 {
3979 case OP_JO:
3980 case OP_JNO:
3981 case OP_JC:
3982 case OP_JNC:
3983 case OP_JE:
3984 case OP_JNE:
3985 case OP_JBE:
3986 case OP_JNBE:
3987 case OP_JS:
3988 case OP_JNS:
3989 case OP_JP:
3990 case OP_JNP:
3991 case OP_JL:
3992 case OP_JNL:
3993 case OP_JLE:
3994 case OP_JNLE:
3995 case OP_JMP:
3996 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3997 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
3998 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
3999 goto failure;
4000
4001 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4002 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4003 goto failure;
4004
4005 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4006 {
4007 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4008 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4009 rc = VERR_PATCHING_REFUSED;
4010 goto failure;
4011 }
4012
4013 break;
4014
4015 default:
4016 goto failure;
4017 }
4018
4019 // make a copy of the guest code bytes that will be overwritten
4020 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4021 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4022 pPatch->cbPatchJump = pCpu->cbInstr;
4023
4024 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4025 AssertRC(rc);
4026
4027 /* Now insert a jump in the guest code. */
4028 /*
4029 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4030 * references the target instruction in the conflict patch.
4031 */
4032 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4033
4034 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4035 pPatch->pPatchJumpDestGC = pJmpDest;
4036
4037 PATMP2GLOOKUPREC cacheRec;
4038 RT_ZERO(cacheRec);
4039 cacheRec.pPatch = pPatch;
4040
4041 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4042 /* Free leftover lock if any. */
4043 if (cacheRec.Lock.pvMap)
4044 {
4045 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4046 cacheRec.Lock.pvMap = NULL;
4047 }
4048 AssertRC(rc);
4049 if (RT_FAILURE(rc))
4050 goto failure;
4051
4052 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4053
4054 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4055 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4056
4057 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4058
4059 /* Lowest and highest address for write monitoring. */
4060 pPatch->pInstrGCLowest = pInstrGC;
4061 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4062
4063 pPatch->uState = PATCH_ENABLED;
4064 return VINF_SUCCESS;
4065
4066failure:
4067 /* Turn this cli patch into a dummy. */
4068 pPatch->uState = PATCH_REFUSED;
4069
4070 return rc;
4071}
4072#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4073
4074
4075/**
4076 * Gives hint to PATM about supervisor guest instructions
4077 *
4078 * @returns VBox status code.
4079 * @param pVM Pointer to the VM.
4080 * @param pInstr Guest context point to privileged instruction
4081 * @param flags Patch flags
4082 */
4083VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4084{
4085 Assert(pInstrGC);
4086 Assert(flags == PATMFL_CODE32);
4087
4088 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4089 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4090}
4091
4092/**
4093 * Patch privileged instruction at specified location
4094 *
4095 * @returns VBox status code.
4096 * @param pVM Pointer to the VM.
4097 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4098 * @param flags Patch flags
4099 *
4100 * @note returns failure if patching is not allowed or possible
4101 */
4102VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4103{
4104 DISCPUSTATE cpu;
4105 R3PTRTYPE(uint8_t *) pInstrHC;
4106 uint32_t cbInstr;
4107 PPATMPATCHREC pPatchRec;
4108 PCPUMCTX pCtx = 0;
4109 bool disret;
4110 int rc;
4111 PVMCPU pVCpu = VMMGetCpu0(pVM);
4112 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4113
4114 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4115
4116 if ( !pVM
4117 || pInstrGC == 0
4118 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4119 {
4120 AssertFailed();
4121 return VERR_INVALID_PARAMETER;
4122 }
4123
4124 if (PATMIsEnabled(pVM) == false)
4125 return VERR_PATCHING_REFUSED;
4126
4127 /* Test for patch conflict only with patches that actually change guest code. */
4128 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4129 {
4130 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4131 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4132 if (pConflictPatch != 0)
4133 return VERR_PATCHING_REFUSED;
4134 }
4135
4136 if (!(flags & PATMFL_CODE32))
4137 {
4138 /** @todo Only 32 bits code right now */
4139 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4140 return VERR_NOT_IMPLEMENTED;
4141 }
4142
4143 /* We ran out of patch memory; don't bother anymore. */
4144 if (pVM->patm.s.fOutOfMemory == true)
4145 return VERR_PATCHING_REFUSED;
4146
4147#if 1 /* DONT COMMIT ENABLED! */
4148 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4149 if ( 0
4150 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4151 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4152 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4153 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4154 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4155 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4156 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4157 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4158 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4159 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4160 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4161 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4162 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4163 || pInstrGC == 0x80014447 /* KfLowerIrql */
4164 || 0)
4165 {
4166 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4167 return VERR_PATCHING_REFUSED;
4168 }
4169#endif
4170
4171 /* Make sure the code selector is wide open; otherwise refuse. */
4172 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4173 if (CPUMGetGuestCPL(pVCpu) == 0)
4174 {
4175 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4176 if (pInstrGCFlat != pInstrGC)
4177 {
4178 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4179 return VERR_PATCHING_REFUSED;
4180 }
4181 }
4182
4183 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4184 if (!(flags & PATMFL_GUEST_SPECIFIC))
4185 {
4186 /* New code. Make sure CSAM has a go at it first. */
4187 CSAMR3CheckCode(pVM, pInstrGC);
4188 }
4189
4190 /* Note: obsolete */
4191 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4192 && (flags & PATMFL_MMIO_ACCESS))
4193 {
4194 RTRCUINTPTR offset;
4195 void *pvPatchCoreOffset;
4196
4197 /* Find the patch record. */
4198 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4199 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4200 if (pvPatchCoreOffset == NULL)
4201 {
4202 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4203 return VERR_PATCH_NOT_FOUND; //fatal error
4204 }
4205 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4206
4207 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4208 }
4209
4210 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4211
4212 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4213 if (pPatchRec)
4214 {
4215 Assert(!(flags & PATMFL_TRAMPOLINE));
4216
4217 /* Hints about existing patches are ignored. */
4218 if (flags & PATMFL_INSTR_HINT)
4219 return VERR_PATCHING_REFUSED;
4220
4221 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4222 {
4223 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4224 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4225 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4226 }
4227
4228 if (pPatchRec->patch.uState == PATCH_DISABLED)
4229 {
4230 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4231 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4232 {
4233 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4234 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4235 }
4236 else
4237 Log(("Enabling patch %RRv again\n", pInstrGC));
4238
4239 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4240 rc = PATMR3EnablePatch(pVM, pInstrGC);
4241 if (RT_SUCCESS(rc))
4242 return VWRN_PATCH_ENABLED;
4243
4244 return rc;
4245 }
4246 if ( pPatchRec->patch.uState == PATCH_ENABLED
4247 || pPatchRec->patch.uState == PATCH_DIRTY)
4248 {
4249 /*
4250 * The patch might have been overwritten.
4251 */
4252 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4253 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4254 {
4255 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4256 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4257 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4258 {
4259 if (flags & PATMFL_IDTHANDLER)
4260 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4261
4262 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4263 }
4264 }
4265 rc = PATMR3RemovePatch(pVM, pInstrGC);
4266 if (RT_FAILURE(rc))
4267 return VERR_PATCHING_REFUSED;
4268 }
4269 else
4270 {
4271 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4272 /* already tried it once! */
4273 return VERR_PATCHING_REFUSED;
4274 }
4275 }
4276
4277 RTGCPHYS GCPhys;
4278 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4279 if (rc != VINF_SUCCESS)
4280 {
4281 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4282 return rc;
4283 }
4284 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4285 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4286 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4287 {
4288 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4289 return VERR_PATCHING_REFUSED;
4290 }
4291
4292 /* Initialize cache record for guest address translations. */
4293 bool fInserted;
4294 PATMP2GLOOKUPREC cacheRec;
4295 RT_ZERO(cacheRec);
4296
4297 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4298 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4299
4300 /* Allocate patch record. */
4301 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4302 if (RT_FAILURE(rc))
4303 {
4304 Log(("Out of memory!!!!\n"));
4305 return VERR_NO_MEMORY;
4306 }
4307 pPatchRec->Core.Key = pInstrGC;
4308 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4309 /* Insert patch record into the lookup tree. */
4310 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4311 Assert(fInserted);
4312
4313 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4314 pPatchRec->patch.flags = flags;
4315 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4316 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4317
4318 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4319 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4320
4321 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4322 {
4323 /*
4324 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4325 */
4326 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4327 if (pPatchNear)
4328 {
4329 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4330 {
4331 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4332
4333 pPatchRec->patch.uState = PATCH_UNUSABLE;
4334 /*
4335 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4336 */
4337 return VERR_PATCHING_REFUSED;
4338 }
4339 }
4340 }
4341
4342 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4343 if (pPatchRec->patch.pTempInfo == 0)
4344 {
4345 Log(("Out of memory!!!!\n"));
4346 return VERR_NO_MEMORY;
4347 }
4348
4349 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4350 if (disret == false)
4351 {
4352 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4353 return VERR_PATCHING_REFUSED;
4354 }
4355
4356 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4357 if (cbInstr > MAX_INSTR_SIZE)
4358 return VERR_PATCHING_REFUSED;
4359
4360 pPatchRec->patch.cbPrivInstr = cbInstr;
4361 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4362
4363 /* Restricted hinting for now. */
4364 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4365
4366 /* Initialize cache record patch pointer. */
4367 cacheRec.pPatch = &pPatchRec->patch;
4368
4369 /* Allocate statistics slot */
4370 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4371 {
4372 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4373 }
4374 else
4375 {
4376 Log(("WARNING: Patch index wrap around!!\n"));
4377 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4378 }
4379
4380 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4381 {
4382 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4383 }
4384 else
4385 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4386 {
4387 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4388 }
4389 else
4390 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4391 {
4392 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4393 }
4394 else
4395 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4396 {
4397 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4398 }
4399 else
4400 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4401 {
4402 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4403 }
4404 else
4405 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4406 {
4407 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4408 }
4409 else
4410 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4411 {
4412 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4413 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4414
4415 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4416#ifdef VBOX_WITH_STATISTICS
4417 if ( rc == VINF_SUCCESS
4418 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4419 {
4420 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4421 }
4422#endif
4423 }
4424 else
4425 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4426 {
4427 switch (cpu.pCurInstr->uOpcode)
4428 {
4429 case OP_SYSENTER:
4430 case OP_PUSH:
4431 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4432 if (rc == VINF_SUCCESS)
4433 {
4434 if (rc == VINF_SUCCESS)
4435 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4436 return rc;
4437 }
4438 break;
4439
4440 default:
4441 rc = VERR_NOT_IMPLEMENTED;
4442 break;
4443 }
4444 }
4445 else
4446 {
4447 switch (cpu.pCurInstr->uOpcode)
4448 {
4449 case OP_SYSENTER:
4450 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4451 if (rc == VINF_SUCCESS)
4452 {
4453 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4454 return VINF_SUCCESS;
4455 }
4456 break;
4457
4458#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4459 case OP_JO:
4460 case OP_JNO:
4461 case OP_JC:
4462 case OP_JNC:
4463 case OP_JE:
4464 case OP_JNE:
4465 case OP_JBE:
4466 case OP_JNBE:
4467 case OP_JS:
4468 case OP_JNS:
4469 case OP_JP:
4470 case OP_JNP:
4471 case OP_JL:
4472 case OP_JNL:
4473 case OP_JLE:
4474 case OP_JNLE:
4475 case OP_JECXZ:
4476 case OP_LOOP:
4477 case OP_LOOPNE:
4478 case OP_LOOPE:
4479 case OP_JMP:
4480 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4481 {
4482 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4483 break;
4484 }
4485 return VERR_NOT_IMPLEMENTED;
4486#endif
4487
4488 case OP_PUSHF:
4489 case OP_CLI:
4490 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4491 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4492 break;
4493
4494#ifndef VBOX_WITH_SAFE_STR
4495 case OP_STR:
4496#endif
4497 case OP_SGDT:
4498 case OP_SLDT:
4499 case OP_SIDT:
4500 case OP_CPUID:
4501 case OP_LSL:
4502 case OP_LAR:
4503 case OP_SMSW:
4504 case OP_VERW:
4505 case OP_VERR:
4506 case OP_IRET:
4507#ifdef VBOX_WITH_RAW_RING1
4508 case OP_MOV:
4509#endif
4510 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4511 break;
4512
4513 default:
4514 return VERR_NOT_IMPLEMENTED;
4515 }
4516 }
4517
4518 if (rc != VINF_SUCCESS)
4519 {
4520 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4521 {
4522 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4523 pPatchRec->patch.nrPatch2GuestRecs = 0;
4524 }
4525 pVM->patm.s.uCurrentPatchIdx--;
4526 }
4527 else
4528 {
4529 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4530 AssertRCReturn(rc, rc);
4531
4532 /* Keep track upper and lower boundaries of patched instructions */
4533 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4534 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4535 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4536 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4537
4538 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4539 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4540
4541 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4542 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4543
4544 rc = VINF_SUCCESS;
4545
4546 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4547 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4548 {
4549 rc = PATMR3DisablePatch(pVM, pInstrGC);
4550 AssertRCReturn(rc, rc);
4551 }
4552
4553#ifdef VBOX_WITH_STATISTICS
4554 /* Register statistics counter */
4555 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4556 {
4557 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4558 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4559#ifndef DEBUG_sandervl
4560 /* Full breakdown for the GUI. */
4561 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4562 "/PATM/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4563 STAMR3RegisterF(pVM, &pPatchRec->patch.pPatchBlockOffset,STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/offPatchBlock", pPatchRec->patch.pPrivInstrGC);
4564 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4565 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4566 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4567 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4568 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4569 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4570 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4571 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4572 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4573 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4574 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4575 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4576 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4577 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4578#endif
4579 }
4580#endif
4581
4582 /* Add debug symbol. */
4583 patmR3DbgAddPatch(pVM, pPatchRec);
4584 }
4585 /* Free leftover lock if any. */
4586 if (cacheRec.Lock.pvMap)
4587 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4588 return rc;
4589}
4590
4591/**
4592 * Query instruction size
4593 *
4594 * @returns VBox status code.
4595 * @param pVM Pointer to the VM.
4596 * @param pPatch Patch record
4597 * @param pInstrGC Instruction address
4598 */
4599static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4600{
4601 uint8_t *pInstrHC;
4602 PGMPAGEMAPLOCK Lock;
4603
4604 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4605 if (rc == VINF_SUCCESS)
4606 {
4607 DISCPUSTATE cpu;
4608 bool disret;
4609 uint32_t cbInstr;
4610
4611 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4612 PGMPhysReleasePageMappingLock(pVM, &Lock);
4613 if (disret)
4614 return cbInstr;
4615 }
4616 return 0;
4617}
4618
4619/**
4620 * Add patch to page record
4621 *
4622 * @returns VBox status code.
4623 * @param pVM Pointer to the VM.
4624 * @param pPage Page address
4625 * @param pPatch Patch record
4626 */
4627int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4628{
4629 PPATMPATCHPAGE pPatchPage;
4630 int rc;
4631
4632 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4633
4634 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4635 if (pPatchPage)
4636 {
4637 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4638 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4639 {
4640 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4641 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4642
4643 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4644 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4645 (void **)&pPatchPage->papPatch);
4646 if (RT_FAILURE(rc))
4647 {
4648 Log(("Out of memory!!!!\n"));
4649 return VERR_NO_MEMORY;
4650 }
4651 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4652 MMHyperFree(pVM, papPatchOld);
4653 }
4654 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4655 pPatchPage->cCount++;
4656 }
4657 else
4658 {
4659 bool fInserted;
4660
4661 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4662 if (RT_FAILURE(rc))
4663 {
4664 Log(("Out of memory!!!!\n"));
4665 return VERR_NO_MEMORY;
4666 }
4667 pPatchPage->Core.Key = pPage;
4668 pPatchPage->cCount = 1;
4669 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4670
4671 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4672 (void **)&pPatchPage->papPatch);
4673 if (RT_FAILURE(rc))
4674 {
4675 Log(("Out of memory!!!!\n"));
4676 MMHyperFree(pVM, pPatchPage);
4677 return VERR_NO_MEMORY;
4678 }
4679 pPatchPage->papPatch[0] = pPatch;
4680
4681 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4682 Assert(fInserted);
4683 pVM->patm.s.cPageRecords++;
4684
4685 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4686 }
4687 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4688
4689 /* Get the closest guest instruction (from below) */
4690 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4691 Assert(pGuestToPatchRec);
4692 if (pGuestToPatchRec)
4693 {
4694 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4695 if ( pPatchPage->pLowestAddrGC == 0
4696 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4697 {
4698 RTRCUINTPTR offset;
4699
4700 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4701
4702 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4703 /* If we're too close to the page boundary, then make sure an
4704 instruction from the previous page doesn't cross the
4705 boundary itself. */
4706 if (offset && offset < MAX_INSTR_SIZE)
4707 {
4708 /* Get the closest guest instruction (from above) */
4709 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4710
4711 if (pGuestToPatchRec)
4712 {
4713 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4714 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4715 {
4716 pPatchPage->pLowestAddrGC = pPage;
4717 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4718 }
4719 }
4720 }
4721 }
4722 }
4723
4724 /* Get the closest guest instruction (from above) */
4725 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4726 Assert(pGuestToPatchRec);
4727 if (pGuestToPatchRec)
4728 {
4729 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4730 if ( pPatchPage->pHighestAddrGC == 0
4731 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4732 {
4733 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4734 /* Increase by instruction size. */
4735 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4736//// Assert(size);
4737 pPatchPage->pHighestAddrGC += size;
4738 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4739 }
4740 }
4741
4742 return VINF_SUCCESS;
4743}
4744
4745/**
4746 * Remove patch from page record
4747 *
4748 * @returns VBox status code.
4749 * @param pVM Pointer to the VM.
4750 * @param pPage Page address
4751 * @param pPatch Patch record
4752 */
4753int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4754{
4755 PPATMPATCHPAGE pPatchPage;
4756 int rc;
4757
4758 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4759 Assert(pPatchPage);
4760
4761 if (!pPatchPage)
4762 return VERR_INVALID_PARAMETER;
4763
4764 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4765
4766 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4767 if (pPatchPage->cCount > 1)
4768 {
4769 uint32_t i;
4770
4771 /* Used by multiple patches */
4772 for (i = 0; i < pPatchPage->cCount; i++)
4773 {
4774 if (pPatchPage->papPatch[i] == pPatch)
4775 {
4776 /* close the gap between the remaining pointers. */
4777 uint32_t cNew = --pPatchPage->cCount;
4778 if (i < cNew)
4779 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4780 pPatchPage->papPatch[cNew] = NULL;
4781 return VINF_SUCCESS;
4782 }
4783 }
4784 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4785 }
4786 else
4787 {
4788 PPATMPATCHPAGE pPatchNode;
4789
4790 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4791
4792 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4793 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4794 Assert(pPatchNode && pPatchNode == pPatchPage);
4795
4796 Assert(pPatchPage->papPatch);
4797 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4798 AssertRC(rc);
4799 rc = MMHyperFree(pVM, pPatchPage);
4800 AssertRC(rc);
4801 pVM->patm.s.cPageRecords--;
4802 }
4803 return VINF_SUCCESS;
4804}
4805
4806/**
4807 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4808 *
4809 * @returns VBox status code.
4810 * @param pVM Pointer to the VM.
4811 * @param pPatch Patch record
4812 */
4813int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4814{
4815 int rc;
4816 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4817
4818 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4819 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4820 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4821
4822 /** @todo optimize better (large gaps between current and next used page) */
4823 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4824 {
4825 /* Get the closest guest instruction (from above) */
4826 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4827 if ( pGuestToPatchRec
4828 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4829 )
4830 {
4831 /* Code in page really patched -> add record */
4832 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4833 AssertRC(rc);
4834 }
4835 }
4836 pPatch->flags |= PATMFL_CODE_MONITORED;
4837 return VINF_SUCCESS;
4838}
4839
4840/**
4841 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4842 *
4843 * @returns VBox status code.
4844 * @param pVM Pointer to the VM.
4845 * @param pPatch Patch record
4846 */
4847static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4848{
4849 int rc;
4850 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4851
4852 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4853 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4854 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4855
4856 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4857 {
4858 /* Get the closest guest instruction (from above) */
4859 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4860 if ( pGuestToPatchRec
4861 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4862 )
4863 {
4864 /* Code in page really patched -> remove record */
4865 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4866 AssertRC(rc);
4867 }
4868 }
4869 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4870 return VINF_SUCCESS;
4871}
4872
4873/**
4874 * Notifies PATM about a (potential) write to code that has been patched.
4875 *
4876 * @returns VBox status code.
4877 * @param pVM Pointer to the VM.
4878 * @param GCPtr GC pointer to write address
4879 * @param cbWrite Nr of bytes to write
4880 *
4881 */
4882VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4883{
4884 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4885
4886 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4887
4888 Assert(VM_IS_EMT(pVM));
4889 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4890
4891 /* Quick boundary check */
4892 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4893 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4894 )
4895 return VINF_SUCCESS;
4896
4897 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4898
4899 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4900 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4901
4902 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4903 {
4904loop_start:
4905 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4906 if (pPatchPage)
4907 {
4908 uint32_t i;
4909 bool fValidPatchWrite = false;
4910
4911 /* Quick check to see if the write is in the patched part of the page */
4912 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4913 || pPatchPage->pHighestAddrGC < GCPtr)
4914 {
4915 break;
4916 }
4917
4918 for (i=0;i<pPatchPage->cCount;i++)
4919 {
4920 if (pPatchPage->papPatch[i])
4921 {
4922 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4923 RTRCPTR pPatchInstrGC;
4924 //unused: bool fForceBreak = false;
4925
4926 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4927 /** @todo inefficient and includes redundant checks for multiple pages. */
4928 for (uint32_t j=0; j<cbWrite; j++)
4929 {
4930 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4931
4932 if ( pPatch->cbPatchJump
4933 && pGuestPtrGC >= pPatch->pPrivInstrGC
4934 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4935 {
4936 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4937 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4938 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4939 if (rc == VINF_SUCCESS)
4940 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4941 goto loop_start;
4942
4943 continue;
4944 }
4945
4946 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4947 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4948 if (!pPatchInstrGC)
4949 {
4950 RTRCPTR pClosestInstrGC;
4951 uint32_t size;
4952
4953 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4954 if (pPatchInstrGC)
4955 {
4956 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4957 Assert(pClosestInstrGC <= pGuestPtrGC);
4958 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4959 /* Check if this is not a write into a gap between two patches */
4960 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4961 pPatchInstrGC = 0;
4962 }
4963 }
4964 if (pPatchInstrGC)
4965 {
4966 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4967
4968 fValidPatchWrite = true;
4969
4970 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4971 Assert(pPatchToGuestRec);
4972 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4973 {
4974 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4975
4976 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4977 {
4978 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4979
4980 patmR3MarkDirtyPatch(pVM, pPatch);
4981
4982 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4983 goto loop_start;
4984 }
4985 else
4986 {
4987 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4988 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4989
4990 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4991 pPatchToGuestRec->fDirty = true;
4992
4993 *pInstrHC = 0xCC;
4994
4995 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4996 }
4997 }
4998 /* else already marked dirty */
4999 }
5000 }
5001 }
5002 } /* for each patch */
5003
5004 if (fValidPatchWrite == false)
5005 {
5006 /* Write to a part of the page that either:
5007 * - doesn't contain any code (shared code/data); rather unlikely
5008 * - old code page that's no longer in active use.
5009 */
5010invalid_write_loop_start:
5011 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5012
5013 if (pPatchPage)
5014 {
5015 for (i=0;i<pPatchPage->cCount;i++)
5016 {
5017 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5018
5019 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5020 {
5021 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5022 if (pPatch->flags & PATMFL_IDTHANDLER)
5023 {
5024 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5025
5026 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5027 int rc = patmRemovePatchPages(pVM, pPatch);
5028 AssertRC(rc);
5029 }
5030 else
5031 {
5032 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5033 patmR3MarkDirtyPatch(pVM, pPatch);
5034 }
5035 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5036 goto invalid_write_loop_start;
5037 }
5038 } /* for */
5039 }
5040 }
5041 }
5042 }
5043 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5044 return VINF_SUCCESS;
5045
5046}
5047
5048/**
5049 * Disable all patches in a flushed page
5050 *
5051 * @returns VBox status code
5052 * @param pVM Pointer to the VM.
5053 * @param addr GC address of the page to flush
5054 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5055 * having to double check if the physical address has changed
5056 */
5057VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5058{
5059 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5060
5061 addr &= PAGE_BASE_GC_MASK;
5062
5063 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5064 if (pPatchPage)
5065 {
5066 int i;
5067
5068 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5069 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5070 {
5071 if (pPatchPage->papPatch[i])
5072 {
5073 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5074
5075 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5076 patmR3MarkDirtyPatch(pVM, pPatch);
5077 }
5078 }
5079 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5080 }
5081 return VINF_SUCCESS;
5082}
5083
5084/**
5085 * Checks if the instructions at the specified address has been patched already.
5086 *
5087 * @returns boolean, patched or not
5088 * @param pVM Pointer to the VM.
5089 * @param pInstrGC Guest context pointer to instruction
5090 */
5091VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5092{
5093 Assert(!HMIsEnabled(pVM));
5094 PPATMPATCHREC pPatchRec;
5095 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5096 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5097 return true;
5098 return false;
5099}
5100
5101/**
5102 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5103 *
5104 * @returns VBox status code.
5105 * @param pVM Pointer to the VM.
5106 * @param pInstrGC GC address of instr
5107 * @param pByte opcode byte pointer (OUT)
5108 *
5109 */
5110VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5111{
5112 PPATMPATCHREC pPatchRec;
5113
5114 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5115
5116 /* Shortcut. */
5117 if (!PATMIsEnabled(pVM))
5118 return VERR_PATCH_NOT_FOUND;
5119 Assert(!HMIsEnabled(pVM));
5120 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5121 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5122 return VERR_PATCH_NOT_FOUND;
5123
5124 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5125 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5126 if ( pPatchRec
5127 && pPatchRec->patch.uState == PATCH_ENABLED
5128 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5129 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5130 {
5131 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5132 *pByte = pPatchRec->patch.aPrivInstr[offset];
5133
5134 if (pPatchRec->patch.cbPatchJump == 1)
5135 {
5136 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5137 }
5138 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5139 return VINF_SUCCESS;
5140 }
5141 return VERR_PATCH_NOT_FOUND;
5142}
5143
5144/**
5145 * Read instruction bytes of the original code that was overwritten by the 5
5146 * bytes patch jump.
5147 *
5148 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5149 * @param pVM Pointer to the VM.
5150 * @param GCPtrInstr GC address of instr
5151 * @param pbDst The output buffer.
5152 * @param cbToRead The maximum number bytes to read.
5153 * @param pcbRead Where to return the acutal number of bytes read.
5154 */
5155VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5156{
5157 /* Shortcut. */
5158 if (!PATMIsEnabled(pVM))
5159 return VERR_PATCH_NOT_FOUND;
5160 Assert(!HMIsEnabled(pVM));
5161 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5162 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5163 return VERR_PATCH_NOT_FOUND;
5164
5165 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5166
5167 /*
5168 * If the patch is enabled and the pointer lies within 5 bytes of this
5169 * priv instr ptr, then we've got a hit!
5170 */
5171 RTGCPTR32 off;
5172 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5173 GCPtrInstr, false /*fAbove*/);
5174 if ( pPatchRec
5175 && pPatchRec->patch.uState == PATCH_ENABLED
5176 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5177 {
5178 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5179 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5180 if (cbToRead > cbMax)
5181 cbToRead = cbMax;
5182 switch (cbToRead)
5183 {
5184 case 5: pbDst[4] = pbSrc[4];
5185 case 4: pbDst[3] = pbSrc[3];
5186 case 3: pbDst[2] = pbSrc[2];
5187 case 2: pbDst[1] = pbSrc[1];
5188 case 1: pbDst[0] = pbSrc[0];
5189 break;
5190 default:
5191 memcpy(pbDst, pbSrc, cbToRead);
5192 }
5193 *pcbRead = cbToRead;
5194
5195 if (pPatchRec->patch.cbPatchJump == 1)
5196 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5197 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5198 return VINF_SUCCESS;
5199 }
5200
5201 return VERR_PATCH_NOT_FOUND;
5202}
5203
5204/**
5205 * Disable patch for privileged instruction at specified location
5206 *
5207 * @returns VBox status code.
5208 * @param pVM Pointer to the VM.
5209 * @param pInstr Guest context point to privileged instruction
5210 *
5211 * @note returns failure if patching is not allowed or possible
5212 *
5213 */
5214VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5215{
5216 PPATMPATCHREC pPatchRec;
5217 PPATCHINFO pPatch;
5218
5219 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5220 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5221 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5222 if (pPatchRec)
5223 {
5224 int rc = VINF_SUCCESS;
5225
5226 pPatch = &pPatchRec->patch;
5227
5228 /* Already disabled? */
5229 if (pPatch->uState == PATCH_DISABLED)
5230 return VINF_SUCCESS;
5231
5232 /* Clear the IDT entries for the patch we're disabling. */
5233 /* Note: very important as we clear IF in the patch itself */
5234 /** @todo this needs to be changed */
5235 if (pPatch->flags & PATMFL_IDTHANDLER)
5236 {
5237 uint32_t iGate;
5238
5239 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5240 if (iGate != (uint32_t)~0)
5241 {
5242 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5243 if (++cIDTHandlersDisabled < 256)
5244 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5245 }
5246 }
5247
5248 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5249 if ( pPatch->pPatchBlockOffset
5250 && pPatch->uState == PATCH_ENABLED)
5251 {
5252 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5253 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5254 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5255 }
5256
5257 /* IDT or function patches haven't changed any guest code. */
5258 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5259 {
5260 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5261 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5262
5263 if (pPatch->uState != PATCH_REFUSED)
5264 {
5265 uint8_t temp[16];
5266
5267 Assert(pPatch->cbPatchJump < sizeof(temp));
5268
5269 /* Let's first check if the guest code is still the same. */
5270 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5271 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5272 if (rc == VINF_SUCCESS)
5273 {
5274 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5275
5276 if ( temp[0] != 0xE9 /* jmp opcode */
5277 || *(RTRCINTPTR *)(&temp[1]) != displ
5278 )
5279 {
5280 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5281 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5282 /* Remove it completely */
5283 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5284 rc = PATMR3RemovePatch(pVM, pInstrGC);
5285 AssertRC(rc);
5286 return VWRN_PATCH_REMOVED;
5287 }
5288 patmRemoveJumpToPatch(pVM, pPatch);
5289 }
5290 else
5291 {
5292 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5293 pPatch->uState = PATCH_DISABLE_PENDING;
5294 }
5295 }
5296 else
5297 {
5298 AssertMsgFailed(("Patch was refused!\n"));
5299 return VERR_PATCH_ALREADY_DISABLED;
5300 }
5301 }
5302 else
5303 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5304 {
5305 uint8_t temp[16];
5306
5307 Assert(pPatch->cbPatchJump < sizeof(temp));
5308
5309 /* Let's first check if the guest code is still the same. */
5310 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5311 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5312 if (rc == VINF_SUCCESS)
5313 {
5314 if (temp[0] != 0xCC)
5315 {
5316 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5317 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5318 /* Remove it completely */
5319 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5320 rc = PATMR3RemovePatch(pVM, pInstrGC);
5321 AssertRC(rc);
5322 return VWRN_PATCH_REMOVED;
5323 }
5324 patmDeactivateInt3Patch(pVM, pPatch);
5325 }
5326 }
5327
5328 if (rc == VINF_SUCCESS)
5329 {
5330 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5331 if (pPatch->uState == PATCH_DISABLE_PENDING)
5332 {
5333 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5334 pPatch->uState = PATCH_UNUSABLE;
5335 }
5336 else
5337 if (pPatch->uState != PATCH_DIRTY)
5338 {
5339 pPatch->uOldState = pPatch->uState;
5340 pPatch->uState = PATCH_DISABLED;
5341 }
5342 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5343 }
5344
5345 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5346 return VINF_SUCCESS;
5347 }
5348 Log(("Patch not found!\n"));
5349 return VERR_PATCH_NOT_FOUND;
5350}
5351
5352/**
5353 * Permanently disable patch for privileged instruction at specified location
5354 *
5355 * @returns VBox status code.
5356 * @param pVM Pointer to the VM.
5357 * @param pInstr Guest context instruction pointer
5358 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5359 * @param pConflictPatch Conflicting patch
5360 *
5361 */
5362static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5363{
5364 NOREF(pConflictAddr);
5365#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5366 PATCHINFO patch;
5367 DISCPUSTATE cpu;
5368 R3PTRTYPE(uint8_t *) pInstrHC;
5369 uint32_t cbInstr;
5370 bool disret;
5371 int rc;
5372
5373 RT_ZERO(patch);
5374 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5375 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5376 /*
5377 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5378 * with one that jumps right into the conflict patch.
5379 * Otherwise we must disable the conflicting patch to avoid serious problems.
5380 */
5381 if ( disret == true
5382 && (pConflictPatch->flags & PATMFL_CODE32)
5383 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5384 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5385 {
5386 /* Hint patches must be enabled first. */
5387 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5388 {
5389 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5390 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5391 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5392 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5393 /* Enabling might fail if the patched code has changed in the meantime. */
5394 if (rc != VINF_SUCCESS)
5395 return rc;
5396 }
5397
5398 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5399 if (RT_SUCCESS(rc))
5400 {
5401 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5402 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5403 return VINF_SUCCESS;
5404 }
5405 }
5406#endif
5407
5408 if (pConflictPatch->opcode == OP_CLI)
5409 {
5410 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5411 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5412 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5413 if (rc == VWRN_PATCH_REMOVED)
5414 return VINF_SUCCESS;
5415 if (RT_SUCCESS(rc))
5416 {
5417 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5418 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5419 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5420 if (rc == VERR_PATCH_NOT_FOUND)
5421 return VINF_SUCCESS; /* removed already */
5422
5423 AssertRC(rc);
5424 if (RT_SUCCESS(rc))
5425 {
5426 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5427 return VINF_SUCCESS;
5428 }
5429 }
5430 /* else turned into unusable patch (see below) */
5431 }
5432 else
5433 {
5434 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5435 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5436 if (rc == VWRN_PATCH_REMOVED)
5437 return VINF_SUCCESS;
5438 }
5439
5440 /* No need to monitor the code anymore. */
5441 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5442 {
5443 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5444 AssertRC(rc);
5445 }
5446 pConflictPatch->uState = PATCH_UNUSABLE;
5447 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5448 return VERR_PATCH_DISABLED;
5449}
5450
5451/**
5452 * Enable patch for privileged instruction at specified location
5453 *
5454 * @returns VBox status code.
5455 * @param pVM Pointer to the VM.
5456 * @param pInstr Guest context point to privileged instruction
5457 *
5458 * @note returns failure if patching is not allowed or possible
5459 *
5460 */
5461VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5462{
5463 PPATMPATCHREC pPatchRec;
5464 PPATCHINFO pPatch;
5465
5466 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5467 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5468 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5469 if (pPatchRec)
5470 {
5471 int rc = VINF_SUCCESS;
5472
5473 pPatch = &pPatchRec->patch;
5474
5475 if (pPatch->uState == PATCH_DISABLED)
5476 {
5477 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5478 {
5479 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5480 uint8_t temp[16];
5481
5482 Assert(pPatch->cbPatchJump < sizeof(temp));
5483
5484 /* Let's first check if the guest code is still the same. */
5485 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5486 AssertRC(rc2);
5487 if (rc2 == VINF_SUCCESS)
5488 {
5489 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5490 {
5491 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5492 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5493 /* Remove it completely */
5494 rc = PATMR3RemovePatch(pVM, pInstrGC);
5495 AssertRC(rc);
5496 return VERR_PATCH_NOT_FOUND;
5497 }
5498
5499 PATMP2GLOOKUPREC cacheRec;
5500 RT_ZERO(cacheRec);
5501 cacheRec.pPatch = pPatch;
5502
5503 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5504 /* Free leftover lock if any. */
5505 if (cacheRec.Lock.pvMap)
5506 {
5507 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5508 cacheRec.Lock.pvMap = NULL;
5509 }
5510 AssertRC(rc2);
5511 if (RT_FAILURE(rc2))
5512 return rc2;
5513
5514#ifdef DEBUG
5515 {
5516 DISCPUSTATE cpu;
5517 char szOutput[256];
5518 uint32_t cbInstr;
5519 uint32_t i = 0;
5520 bool disret;
5521 while(i < pPatch->cbPatchJump)
5522 {
5523 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5524 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5525 Log(("Renewed patch instr: %s", szOutput));
5526 i += cbInstr;
5527 }
5528 }
5529#endif
5530 }
5531 }
5532 else
5533 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5534 {
5535 uint8_t temp[16];
5536
5537 Assert(pPatch->cbPatchJump < sizeof(temp));
5538
5539 /* Let's first check if the guest code is still the same. */
5540 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5541 AssertRC(rc2);
5542
5543 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5544 {
5545 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5546 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5547 rc = PATMR3RemovePatch(pVM, pInstrGC);
5548 AssertRC(rc);
5549 return VERR_PATCH_NOT_FOUND;
5550 }
5551
5552 rc2 = patmActivateInt3Patch(pVM, pPatch);
5553 if (RT_FAILURE(rc2))
5554 return rc2;
5555 }
5556
5557 pPatch->uState = pPatch->uOldState; //restore state
5558
5559 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5560 if (pPatch->pPatchBlockOffset)
5561 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5562
5563 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5564 }
5565 else
5566 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5567
5568 return rc;
5569 }
5570 return VERR_PATCH_NOT_FOUND;
5571}
5572
5573/**
5574 * Remove patch for privileged instruction at specified location
5575 *
5576 * @returns VBox status code.
5577 * @param pVM Pointer to the VM.
5578 * @param pPatchRec Patch record
5579 * @param fForceRemove Remove *all* patches
5580 */
5581int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5582{
5583 PPATCHINFO pPatch;
5584
5585 pPatch = &pPatchRec->patch;
5586
5587 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5588 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5589 {
5590 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5591 return VERR_ACCESS_DENIED;
5592 }
5593 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5594
5595 /* Note: NEVER EVER REUSE PATCH MEMORY */
5596 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5597
5598 if (pPatchRec->patch.pPatchBlockOffset)
5599 {
5600 PAVLOU32NODECORE pNode;
5601
5602 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5603 Assert(pNode);
5604 }
5605
5606 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5607 {
5608 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5609 AssertRC(rc);
5610 }
5611
5612#ifdef VBOX_WITH_STATISTICS
5613 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5614 {
5615 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5616 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5617 }
5618#endif
5619
5620 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5621 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5622 pPatch->nrPatch2GuestRecs = 0;
5623 Assert(pPatch->Patch2GuestAddrTree == 0);
5624
5625 patmEmptyTree(pVM, &pPatch->FixupTree);
5626 pPatch->nrFixups = 0;
5627 Assert(pPatch->FixupTree == 0);
5628
5629 if (pPatchRec->patch.pTempInfo)
5630 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5631
5632 /* Note: might fail, because it has already been removed (e.g. during reset). */
5633 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5634
5635 /* Free the patch record */
5636 MMHyperFree(pVM, pPatchRec);
5637 return VINF_SUCCESS;
5638}
5639
5640/**
5641 * RTAvlU32DoWithAll() worker.
5642 * Checks whether the current trampoline instruction is the jump to the target patch
5643 * and updates the displacement to jump to the new target.
5644 *
5645 * @returns VBox status code.
5646 * @retval VERR_ALREADY_EXISTS if the jump was found.
5647 * @param pNode The current patch to guest record to check.
5648 * @param pvUser The refresh state.
5649 */
5650static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5651{
5652 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5653 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5654 PVM pVM = pRefreshPatchState->pVM;
5655
5656 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5657
5658 /*
5659 * Check if the patch instruction starts with a jump.
5660 * ASSUMES that there is no other patch to guest record that starts
5661 * with a jump.
5662 */
5663 if (*pPatchInstr == 0xE9)
5664 {
5665 /* Jump found, update the displacement. */
5666 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5667 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5668 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5669
5670 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5671 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5672
5673 *(uint32_t *)&pPatchInstr[1] = displ;
5674 return VERR_ALREADY_EXISTS; /** @todo better return code */
5675 }
5676
5677 return VINF_SUCCESS;
5678}
5679
5680/**
5681 * Attempt to refresh the patch by recompiling its entire code block
5682 *
5683 * @returns VBox status code.
5684 * @param pVM Pointer to the VM.
5685 * @param pPatchRec Patch record
5686 */
5687int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5688{
5689 PPATCHINFO pPatch;
5690 int rc;
5691 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5692 PTRAMPREC pTrampolinePatchesHead = NULL;
5693
5694 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5695
5696 pPatch = &pPatchRec->patch;
5697 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5698 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5699 {
5700 if (!pPatch->pTrampolinePatchesHead)
5701 {
5702 /*
5703 * It is sometimes possible that there are trampoline patches to this patch
5704 * but they are not recorded (after a saved state load for example).
5705 * Refuse to refresh those patches.
5706 * Can hurt performance in theory if the patched code is modified by the guest
5707 * and is executed often. However most of the time states are saved after the guest
5708 * code was modified and is not updated anymore afterwards so this shouldn't be a
5709 * big problem.
5710 */
5711 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5712 return VERR_PATCHING_REFUSED;
5713 }
5714 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5715 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5716 }
5717
5718 /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5719
5720 rc = PATMR3DisablePatch(pVM, pInstrGC);
5721 AssertRC(rc);
5722
5723 /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5724 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5725#ifdef VBOX_WITH_STATISTICS
5726 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5727 {
5728 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5729 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5730 }
5731#endif
5732
5733 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5734
5735 /* Attempt to install a new patch. */
5736 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5737 if (RT_SUCCESS(rc))
5738 {
5739 RTRCPTR pPatchTargetGC;
5740 PPATMPATCHREC pNewPatchRec;
5741
5742 /* Determine target address in new patch */
5743 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5744 Assert(pPatchTargetGC);
5745 if (!pPatchTargetGC)
5746 {
5747 rc = VERR_PATCHING_REFUSED;
5748 goto failure;
5749 }
5750
5751 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5752 pPatch->uCurPatchOffset = 0;
5753
5754 /* insert jump to new patch in old patch block */
5755 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5756 if (RT_FAILURE(rc))
5757 goto failure;
5758
5759 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5760 Assert(pNewPatchRec); /* can't fail */
5761
5762 /* Remove old patch (only do that when everything is finished) */
5763 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5764 AssertRC(rc2);
5765
5766 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5767 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5768 Assert(fInserted); NOREF(fInserted);
5769
5770 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5771 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5772
5773 /* Used by another patch, so don't remove it! */
5774 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5775
5776 if (pTrampolinePatchesHead)
5777 {
5778 /* Update all trampoline patches to jump to the new patch. */
5779 PTRAMPREC pTrampRec = NULL;
5780 PATMREFRESHPATCH RefreshPatch;
5781
5782 RefreshPatch.pVM = pVM;
5783 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5784
5785 pTrampRec = pTrampolinePatchesHead;
5786
5787 while (pTrampRec)
5788 {
5789 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5790
5791 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5792 /*
5793 * We have to find the right patch2guest record because there might be others
5794 * for statistics.
5795 */
5796 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5797 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5798 Assert(rc == VERR_ALREADY_EXISTS);
5799 rc = VINF_SUCCESS;
5800 pTrampRec = pTrampRec->pNext;
5801 }
5802 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5803 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5804 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5805 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5806 }
5807 }
5808
5809failure:
5810 if (RT_FAILURE(rc))
5811 {
5812 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5813
5814 /* Remove the new inactive patch */
5815 rc = PATMR3RemovePatch(pVM, pInstrGC);
5816 AssertRC(rc);
5817
5818 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5819 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5820 Assert(fInserted); NOREF(fInserted);
5821
5822 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5823 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5824 AssertRC(rc2);
5825
5826 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5827 }
5828 return rc;
5829}
5830
5831/**
5832 * Find patch for privileged instruction at specified location
5833 *
5834 * @returns Patch structure pointer if found; else NULL
5835 * @param pVM Pointer to the VM.
5836 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5837 * @param fIncludeHints Include hinted patches or not
5838 *
5839 */
5840PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5841{
5842 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5843 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5844 if (pPatchRec)
5845 {
5846 if ( pPatchRec->patch.uState == PATCH_ENABLED
5847 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5848 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5849 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5850 {
5851 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5852 return &pPatchRec->patch;
5853 }
5854 else
5855 if ( fIncludeHints
5856 && pPatchRec->patch.uState == PATCH_DISABLED
5857 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5858 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5859 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5860 {
5861 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5862 return &pPatchRec->patch;
5863 }
5864 }
5865 return NULL;
5866}
5867
5868/**
5869 * Checks whether the GC address is inside a generated patch jump
5870 *
5871 * @returns true -> yes, false -> no
5872 * @param pVM Pointer to the VM.
5873 * @param pAddr Guest context address.
5874 * @param pPatchAddr Guest context patch address (if true).
5875 */
5876VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5877{
5878 RTRCPTR addr;
5879 PPATCHINFO pPatch;
5880
5881 Assert(!HMIsEnabled(pVM));
5882 if (PATMIsEnabled(pVM) == false)
5883 return false;
5884
5885 if (pPatchAddr == NULL)
5886 pPatchAddr = &addr;
5887
5888 *pPatchAddr = 0;
5889
5890 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5891 if (pPatch)
5892 *pPatchAddr = pPatch->pPrivInstrGC;
5893
5894 return *pPatchAddr == 0 ? false : true;
5895}
5896
5897/**
5898 * Remove patch for privileged instruction at specified location
5899 *
5900 * @returns VBox status code.
5901 * @param pVM Pointer to the VM.
5902 * @param pInstr Guest context point to privileged instruction
5903 *
5904 * @note returns failure if patching is not allowed or possible
5905 *
5906 */
5907VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5908{
5909 PPATMPATCHREC pPatchRec;
5910
5911 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5912 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5913 if (pPatchRec)
5914 {
5915 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5916 if (rc == VWRN_PATCH_REMOVED)
5917 return VINF_SUCCESS;
5918
5919 return patmR3RemovePatch(pVM, pPatchRec, false);
5920 }
5921 AssertFailed();
5922 return VERR_PATCH_NOT_FOUND;
5923}
5924
5925/**
5926 * Mark patch as dirty
5927 *
5928 * @returns VBox status code.
5929 * @param pVM Pointer to the VM.
5930 * @param pPatch Patch record
5931 *
5932 * @note returns failure if patching is not allowed or possible
5933 *
5934 */
5935static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5936{
5937 if (pPatch->pPatchBlockOffset)
5938 {
5939 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5940 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5941 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5942 }
5943
5944 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5945 /* Put back the replaced instruction. */
5946 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5947 if (rc == VWRN_PATCH_REMOVED)
5948 return VINF_SUCCESS;
5949
5950 /* Note: we don't restore patch pages for patches that are not enabled! */
5951 /* Note: be careful when changing this behaviour!! */
5952
5953 /* The patch pages are no longer marked for self-modifying code detection */
5954 if (pPatch->flags & PATMFL_CODE_MONITORED)
5955 {
5956 rc = patmRemovePatchPages(pVM, pPatch);
5957 AssertRCReturn(rc, rc);
5958 }
5959 pPatch->uState = PATCH_DIRTY;
5960
5961 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5962 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5963
5964 return VINF_SUCCESS;
5965}
5966
5967/**
5968 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5969 *
5970 * @returns VBox status code.
5971 * @param pVM Pointer to the VM.
5972 * @param pPatch Patch block structure pointer
5973 * @param pPatchGC GC address in patch block
5974 */
5975RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5976{
5977 Assert(pPatch->Patch2GuestAddrTree);
5978 /* Get the closest record from below. */
5979 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5980 if (pPatchToGuestRec)
5981 return pPatchToGuestRec->pOrgInstrGC;
5982
5983 return 0;
5984}
5985
5986/**
5987 * Converts Guest code GC ptr to Patch code GC ptr (if found)
5988 *
5989 * @returns corresponding GC pointer in patch block
5990 * @param pVM Pointer to the VM.
5991 * @param pPatch Current patch block pointer
5992 * @param pInstrGC Guest context pointer to privileged instruction
5993 *
5994 */
5995RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5996{
5997 if (pPatch->Guest2PatchAddrTree)
5998 {
5999 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6000 if (pGuestToPatchRec)
6001 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6002 }
6003
6004 return 0;
6005}
6006
6007/**
6008 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6009 *
6010 * @returns corresponding GC pointer in patch block
6011 * @param pVM Pointer to the VM.
6012 * @param pInstrGC Guest context pointer to privileged instruction
6013 */
6014static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6015{
6016 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6017 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6018 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6019 return NIL_RTRCPTR;
6020}
6021
6022/**
6023 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6024 * identical match)
6025 *
6026 * @returns corresponding GC pointer in patch block
6027 * @param pVM Pointer to the VM.
6028 * @param pPatch Current patch block pointer
6029 * @param pInstrGC Guest context pointer to privileged instruction
6030 *
6031 */
6032RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6033{
6034 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6035 if (pGuestToPatchRec)
6036 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6037 return NIL_RTRCPTR;
6038}
6039
6040/**
6041 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6042 *
6043 * @returns original GC instruction pointer or 0 if not found
6044 * @param pVM Pointer to the VM.
6045 * @param pPatchGC GC address in patch block
6046 * @param pEnmState State of the translated address (out)
6047 *
6048 */
6049VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6050{
6051 PPATMPATCHREC pPatchRec;
6052 void *pvPatchCoreOffset;
6053 RTRCPTR pPrivInstrGC;
6054
6055 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6056 Assert(!HMIsEnabled(pVM));
6057 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6058 if (pvPatchCoreOffset == 0)
6059 {
6060 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6061 return 0;
6062 }
6063 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6064 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6065 if (pEnmState)
6066 {
6067 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6068 || pPatchRec->patch.uState == PATCH_DIRTY
6069 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6070 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6071 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6072
6073 if ( !pPrivInstrGC
6074 || pPatchRec->patch.uState == PATCH_UNUSABLE
6075 || pPatchRec->patch.uState == PATCH_REFUSED)
6076 {
6077 pPrivInstrGC = 0;
6078 *pEnmState = PATMTRANS_FAILED;
6079 }
6080 else
6081 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6082 {
6083 *pEnmState = PATMTRANS_INHIBITIRQ;
6084 }
6085 else
6086 if ( pPatchRec->patch.uState == PATCH_ENABLED
6087 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6088 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6089 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6090 {
6091 *pEnmState = PATMTRANS_OVERWRITTEN;
6092 }
6093 else
6094 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6095 {
6096 *pEnmState = PATMTRANS_OVERWRITTEN;
6097 }
6098 else
6099 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6100 {
6101 *pEnmState = PATMTRANS_PATCHSTART;
6102 }
6103 else
6104 *pEnmState = PATMTRANS_SAFE;
6105 }
6106 return pPrivInstrGC;
6107}
6108
6109/**
6110 * Returns the GC pointer of the patch for the specified GC address
6111 *
6112 * @returns VBox status code.
6113 * @param pVM Pointer to the VM.
6114 * @param pAddrGC Guest context address
6115 */
6116VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6117{
6118 PPATMPATCHREC pPatchRec;
6119
6120 Assert(!HMIsEnabled(pVM));
6121
6122 /* Find the patch record. */
6123 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6124 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6125 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6126 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6127 return NIL_RTRCPTR;
6128}
6129
6130/**
6131 * Attempt to recover dirty instructions
6132 *
6133 * @returns VBox status code.
6134 * @param pVM Pointer to the VM.
6135 * @param pCtx Pointer to the guest CPU context.
6136 * @param pPatch Patch record.
6137 * @param pPatchToGuestRec Patch to guest address record.
6138 * @param pEip GC pointer of trapping instruction.
6139 */
6140static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6141{
6142 DISCPUSTATE CpuOld, CpuNew;
6143 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6144 int rc;
6145 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6146 uint32_t cbDirty;
6147 PRECPATCHTOGUEST pRec;
6148 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6149 PVMCPU pVCpu = VMMGetCpu0(pVM);
6150 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6151
6152 pRec = pPatchToGuestRec;
6153 pCurInstrGC = pOrgInstrGC;
6154 pCurPatchInstrGC = pEip;
6155 cbDirty = 0;
6156 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6157
6158 /* Find all adjacent dirty instructions */
6159 while (true)
6160 {
6161 if (pRec->fJumpTarget)
6162 {
6163 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6164 pRec->fDirty = false;
6165 return VERR_PATCHING_REFUSED;
6166 }
6167
6168 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6169 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6170 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6171
6172 /* Only harmless instructions are acceptable. */
6173 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6174 if ( RT_FAILURE(rc)
6175 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6176 {
6177 if (RT_SUCCESS(rc))
6178 cbDirty += CpuOld.cbInstr;
6179 else
6180 if (!cbDirty)
6181 cbDirty = 1;
6182 break;
6183 }
6184
6185#ifdef DEBUG
6186 char szBuf[256];
6187 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6188 szBuf, sizeof(szBuf), NULL);
6189 Log(("DIRTY: %s\n", szBuf));
6190#endif
6191 /* Mark as clean; if we fail we'll let it always fault. */
6192 pRec->fDirty = false;
6193
6194 /* Remove old lookup record. */
6195 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6196 pPatchToGuestRec = NULL;
6197
6198 pCurPatchInstrGC += CpuOld.cbInstr;
6199 cbDirty += CpuOld.cbInstr;
6200
6201 /* Let's see if there's another dirty instruction right after. */
6202 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6203 if (!pRec || !pRec->fDirty)
6204 break; /* no more dirty instructions */
6205
6206 /* In case of complex instructions the next guest instruction could be quite far off. */
6207 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6208 }
6209
6210 if ( RT_SUCCESS(rc)
6211 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6212 )
6213 {
6214 uint32_t cbLeft;
6215
6216 pCurPatchInstrHC = pPatchInstrHC;
6217 pCurPatchInstrGC = pEip;
6218 cbLeft = cbDirty;
6219
6220 while (cbLeft && RT_SUCCESS(rc))
6221 {
6222 bool fValidInstr;
6223
6224 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6225
6226 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6227 if ( !fValidInstr
6228 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6229 )
6230 {
6231 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6232
6233 if ( pTargetGC >= pOrgInstrGC
6234 && pTargetGC <= pOrgInstrGC + cbDirty
6235 )
6236 {
6237 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6238 fValidInstr = true;
6239 }
6240 }
6241
6242 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6243 if ( rc == VINF_SUCCESS
6244 && CpuNew.cbInstr <= cbLeft /* must still fit */
6245 && fValidInstr
6246 )
6247 {
6248#ifdef DEBUG
6249 char szBuf[256];
6250 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6251 szBuf, sizeof(szBuf), NULL);
6252 Log(("NEW: %s\n", szBuf));
6253#endif
6254
6255 /* Copy the new instruction. */
6256 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6257 AssertRC(rc);
6258
6259 /* Add a new lookup record for the duplicated instruction. */
6260 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6261 }
6262 else
6263 {
6264#ifdef DEBUG
6265 char szBuf[256];
6266 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6267 szBuf, sizeof(szBuf), NULL);
6268 Log(("NEW: %s (FAILED)\n", szBuf));
6269#endif
6270 /* Restore the old lookup record for the duplicated instruction. */
6271 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6272
6273 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6274 rc = VERR_PATCHING_REFUSED;
6275 break;
6276 }
6277 pCurInstrGC += CpuNew.cbInstr;
6278 pCurPatchInstrHC += CpuNew.cbInstr;
6279 pCurPatchInstrGC += CpuNew.cbInstr;
6280 cbLeft -= CpuNew.cbInstr;
6281
6282 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6283 if (!cbLeft)
6284 {
6285 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6286 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6287 {
6288 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6289 if (pRec)
6290 {
6291 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6292 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6293
6294 Assert(!pRec->fDirty);
6295
6296 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6297 if (cbFiller >= SIZEOF_NEARJUMP32)
6298 {
6299 pPatchFillHC[0] = 0xE9;
6300 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6301#ifdef DEBUG
6302 char szBuf[256];
6303 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6304 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6305 Log(("FILL: %s\n", szBuf));
6306#endif
6307 }
6308 else
6309 {
6310 for (unsigned i = 0; i < cbFiller; i++)
6311 {
6312 pPatchFillHC[i] = 0x90; /* NOP */
6313#ifdef DEBUG
6314 char szBuf[256];
6315 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6316 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6317 Log(("FILL: %s\n", szBuf));
6318#endif
6319 }
6320 }
6321 }
6322 }
6323 }
6324 }
6325 }
6326 else
6327 rc = VERR_PATCHING_REFUSED;
6328
6329 if (RT_SUCCESS(rc))
6330 {
6331 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6332 }
6333 else
6334 {
6335 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6336 Assert(cbDirty);
6337
6338 /* Mark the whole instruction stream with breakpoints. */
6339 if (cbDirty)
6340 memset(pPatchInstrHC, 0xCC, cbDirty);
6341
6342 if ( pVM->patm.s.fOutOfMemory == false
6343 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6344 {
6345 rc = patmR3RefreshPatch(pVM, pPatch);
6346 if (RT_FAILURE(rc))
6347 {
6348 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6349 }
6350 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6351 rc = VERR_PATCHING_REFUSED;
6352 }
6353 }
6354 return rc;
6355}
6356
6357/**
6358 * Handle trap inside patch code
6359 *
6360 * @returns VBox status code.
6361 * @param pVM Pointer to the VM.
6362 * @param pCtx Pointer to the guest CPU context.
6363 * @param pEip GC pointer of trapping instruction.
6364 * @param ppNewEip GC pointer to new instruction.
6365 */
6366VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6367{
6368 PPATMPATCHREC pPatch = 0;
6369 void *pvPatchCoreOffset;
6370 RTRCUINTPTR offset;
6371 RTRCPTR pNewEip;
6372 int rc ;
6373 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6374 PVMCPU pVCpu = VMMGetCpu0(pVM);
6375
6376 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6377 Assert(pVM->cCpus == 1);
6378
6379 pNewEip = 0;
6380 *ppNewEip = 0;
6381
6382 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6383
6384 /* Find the patch record. */
6385 /* Note: there might not be a patch to guest translation record (global function) */
6386 offset = pEip - pVM->patm.s.pPatchMemGC;
6387 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6388 if (pvPatchCoreOffset)
6389 {
6390 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6391
6392 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6393
6394 if (pPatch->patch.uState == PATCH_DIRTY)
6395 {
6396 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6397 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6398 {
6399 /* Function duplication patches set fPIF to 1 on entry */
6400 pVM->patm.s.pGCStateHC->fPIF = 1;
6401 }
6402 }
6403 else
6404 if (pPatch->patch.uState == PATCH_DISABLED)
6405 {
6406 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6407 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6408 {
6409 /* Function duplication patches set fPIF to 1 on entry */
6410 pVM->patm.s.pGCStateHC->fPIF = 1;
6411 }
6412 }
6413 else
6414 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6415 {
6416 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6417
6418 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6419 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6420 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6421 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6422 }
6423
6424 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6425 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6426
6427 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6428 pPatch->patch.cTraps++;
6429 PATM_STAT_FAULT_INC(&pPatch->patch);
6430 }
6431 else
6432 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6433
6434 /* Check if we were interrupted in PATM generated instruction code. */
6435 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6436 {
6437 DISCPUSTATE Cpu;
6438 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6439 AssertRC(rc);
6440
6441 if ( rc == VINF_SUCCESS
6442 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6443 || Cpu.pCurInstr->uOpcode == OP_PUSH
6444 || Cpu.pCurInstr->uOpcode == OP_CALL)
6445 )
6446 {
6447 uint64_t fFlags;
6448
6449 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6450
6451 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6452 {
6453 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6454 if ( rc == VINF_SUCCESS
6455 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6456 {
6457 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6458
6459 /* Reset the PATM stack. */
6460 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6461
6462 pVM->patm.s.pGCStateHC->fPIF = 1;
6463
6464 Log(("Faulting push -> go back to the original instruction\n"));
6465
6466 /* continue at the original instruction */
6467 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6468 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6469 return VINF_SUCCESS;
6470 }
6471 }
6472
6473 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6474 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6475 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6476 if (rc == VINF_SUCCESS)
6477 {
6478 /* The guest page *must* be present. */
6479 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6480 if ( rc == VINF_SUCCESS
6481 && (fFlags & X86_PTE_P))
6482 {
6483 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6484 return VINF_PATCH_CONTINUE;
6485 }
6486 }
6487 }
6488 else
6489 if (pPatch->patch.pPrivInstrGC == pNewEip)
6490 {
6491 /* Invalidated patch or first instruction overwritten.
6492 * We can ignore the fPIF state in this case.
6493 */
6494 /* Reset the PATM stack. */
6495 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6496
6497 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6498
6499 pVM->patm.s.pGCStateHC->fPIF = 1;
6500
6501 /* continue at the original instruction */
6502 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6503 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6504 return VINF_SUCCESS;
6505 }
6506
6507 char szBuf[256];
6508 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6509
6510 /* Very bad. We crashed in emitted code. Probably stack? */
6511 if (pPatch)
6512 {
6513 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6514 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n",
6515 pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags,
6516 pPatchToGuestRec->fDirty, szBuf));
6517 }
6518 else
6519 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6520 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6521 EMR3FatalError(pVCpu, VERR_PATM_IPE_TRAP_IN_PATCH_CODE);
6522 }
6523
6524 /* From here on, we must have a valid patch to guest translation. */
6525 if (pvPatchCoreOffset == 0)
6526 {
6527 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6528 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6529 return VERR_PATCH_NOT_FOUND;
6530 }
6531
6532 /* Take care of dirty/changed instructions. */
6533 if (pPatchToGuestRec->fDirty)
6534 {
6535 Assert(pPatchToGuestRec->Core.Key == offset);
6536 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6537
6538 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6539 if (RT_SUCCESS(rc))
6540 {
6541 /* Retry the current instruction. */
6542 pNewEip = pEip;
6543 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6544 }
6545 else
6546 {
6547 /* Reset the PATM stack. */
6548 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6549
6550 rc = VINF_SUCCESS; /* Continue at original instruction. */
6551 }
6552
6553 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6554 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6555 return rc;
6556 }
6557
6558#ifdef VBOX_STRICT
6559 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6560 {
6561 DISCPUSTATE cpu;
6562 bool disret;
6563 uint32_t cbInstr;
6564 PATMP2GLOOKUPREC cacheRec;
6565 RT_ZERO(cacheRec);
6566 cacheRec.pPatch = &pPatch->patch;
6567
6568 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6569 &cpu, &cbInstr);
6570 if (cacheRec.Lock.pvMap)
6571 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6572
6573 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6574 {
6575 RTRCPTR retaddr;
6576 PCPUMCTX pCtx2;
6577
6578 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6579
6580 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6581 AssertRC(rc);
6582
6583 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6584 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6585 }
6586 }
6587#endif
6588
6589 /* Return original address, correct by subtracting the CS base address. */
6590 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6591
6592 /* Reset the PATM stack. */
6593 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6594
6595 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6596 {
6597 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6598 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6599#ifdef VBOX_STRICT
6600 DISCPUSTATE cpu;
6601 bool disret;
6602 uint32_t cbInstr;
6603 PATMP2GLOOKUPREC cacheRec;
6604 RT_ZERO(cacheRec);
6605 cacheRec.pPatch = &pPatch->patch;
6606
6607 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6608 &cpu, &cbInstr);
6609 if (cacheRec.Lock.pvMap)
6610 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6611
6612 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6613 {
6614 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6615 &cpu, &cbInstr);
6616 if (cacheRec.Lock.pvMap)
6617 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6618
6619 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6620 }
6621#endif
6622 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6623 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6624 }
6625
6626 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6627 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6628 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6629 {
6630 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6631 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6632 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6633 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6634 return VERR_PATCH_DISABLED;
6635 }
6636
6637#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6638 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6639 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6640 {
6641 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6642 //we are only wasting time, back out the patch
6643 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6644 pTrapRec->pNextPatchInstr = 0;
6645 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6646 return VERR_PATCH_DISABLED;
6647 }
6648#endif
6649
6650 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6651 return VINF_SUCCESS;
6652}
6653
6654
6655/**
6656 * Handle page-fault in monitored page
6657 *
6658 * @returns VBox status code.
6659 * @param pVM Pointer to the VM.
6660 */
6661VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6662{
6663 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6664 PVMCPU pVCpu = VMMGetCpu0(pVM);
6665
6666 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6667 addr &= PAGE_BASE_GC_MASK;
6668
6669 int rc = PGMHandlerVirtualDeregister(pVM, pVCpu, addr, false /*fHypervisor*/);
6670 AssertRC(rc); NOREF(rc);
6671
6672 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6673 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6674 {
6675 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6676 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6677 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6678 if (rc == VWRN_PATCH_REMOVED)
6679 return VINF_SUCCESS;
6680
6681 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6682
6683 if (addr == pPatchRec->patch.pPrivInstrGC)
6684 addr++;
6685 }
6686
6687 for(;;)
6688 {
6689 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6690
6691 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6692 break;
6693
6694 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6695 {
6696 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6697 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6698 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6699 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6700 }
6701 addr = pPatchRec->patch.pPrivInstrGC + 1;
6702 }
6703
6704 pVM->patm.s.pvFaultMonitor = 0;
6705 return VINF_SUCCESS;
6706}
6707
6708
6709#ifdef VBOX_WITH_STATISTICS
6710
6711static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6712{
6713 if (pPatch->flags & PATMFL_SYSENTER)
6714 {
6715 return "SYSENT";
6716 }
6717 else
6718 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6719 {
6720 static char szTrap[16];
6721 uint32_t iGate;
6722
6723 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6724 if (iGate < 256)
6725 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6726 else
6727 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6728 return szTrap;
6729 }
6730 else
6731 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6732 return "DUPFUNC";
6733 else
6734 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6735 return "FUNCCALL";
6736 else
6737 if (pPatch->flags & PATMFL_TRAMPOLINE)
6738 return "TRAMP";
6739 else
6740 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6741}
6742
6743static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6744{
6745 NOREF(pVM);
6746 switch(pPatch->uState)
6747 {
6748 case PATCH_ENABLED:
6749 return "ENA";
6750 case PATCH_DISABLED:
6751 return "DIS";
6752 case PATCH_DIRTY:
6753 return "DIR";
6754 case PATCH_UNUSABLE:
6755 return "UNU";
6756 case PATCH_REFUSED:
6757 return "REF";
6758 case PATCH_DISABLE_PENDING:
6759 return "DIP";
6760 default:
6761 AssertFailed();
6762 return " ";
6763 }
6764}
6765
6766/**
6767 * Resets the sample.
6768 * @param pVM Pointer to the VM.
6769 * @param pvSample The sample registered using STAMR3RegisterCallback.
6770 */
6771static void patmResetStat(PVM pVM, void *pvSample)
6772{
6773 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6774 Assert(pPatch);
6775
6776 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6777 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6778}
6779
6780/**
6781 * Prints the sample into the buffer.
6782 *
6783 * @param pVM Pointer to the VM.
6784 * @param pvSample The sample registered using STAMR3RegisterCallback.
6785 * @param pszBuf The buffer to print into.
6786 * @param cchBuf The size of the buffer.
6787 */
6788static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6789{
6790 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6791 Assert(pPatch);
6792
6793 Assert(pPatch->uState != PATCH_REFUSED);
6794 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6795
6796 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6797 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6798 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6799}
6800
6801/**
6802 * Returns the GC address of the corresponding patch statistics counter
6803 *
6804 * @returns Stat address
6805 * @param pVM Pointer to the VM.
6806 * @param pPatch Patch structure
6807 */
6808RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6809{
6810 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6811 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6812}
6813
6814#endif /* VBOX_WITH_STATISTICS */
6815#ifdef VBOX_WITH_DEBUGGER
6816
6817/**
6818 * The '.patmoff' command.
6819 *
6820 * @returns VBox status.
6821 * @param pCmd Pointer to the command descriptor (as registered).
6822 * @param pCmdHlp Pointer to command helper functions.
6823 * @param pVM Pointer to the current VM (if any).
6824 * @param paArgs Pointer to (readonly) array of arguments.
6825 * @param cArgs Number of arguments in the array.
6826 */
6827static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6828{
6829 /*
6830 * Validate input.
6831 */
6832 NOREF(cArgs); NOREF(paArgs);
6833 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6834 PVM pVM = pUVM->pVM;
6835 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6836
6837 if (HMIsEnabled(pVM))
6838 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6839
6840 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6841 PATMR3AllowPatching(pVM->pUVM, false);
6842 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6843}
6844
6845/**
6846 * The '.patmon' command.
6847 *
6848 * @returns VBox status.
6849 * @param pCmd Pointer to the command descriptor (as registered).
6850 * @param pCmdHlp Pointer to command helper functions.
6851 * @param pVM Pointer to the current VM (if any).
6852 * @param paArgs Pointer to (readonly) array of arguments.
6853 * @param cArgs Number of arguments in the array.
6854 */
6855static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6856{
6857 /*
6858 * Validate input.
6859 */
6860 NOREF(cArgs); NOREF(paArgs);
6861 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6862 PVM pVM = pUVM->pVM;
6863 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6864
6865 if (HMIsEnabled(pVM))
6866 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6867
6868 PATMR3AllowPatching(pVM->pUVM, true);
6869 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6870 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6871}
6872
6873#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette