VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/PATMRC.cpp@ 66227

Last change on this file since 66227 was 65637, checked in by vboxsync, 8 years ago

VMM/PATMRC: properly return with error code

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.1 KB
Line 
1/* $Id: PATMRC.cpp 65637 2017-02-07 10:46:12Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager - Raw-mode Context.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PATM
23#include <VBox/vmm/patm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/selm.h>
31#include <VBox/vmm/mm.h>
32#include "PATMInternal.h"
33#include "PATMA.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/dbg.h>
36#include <VBox/dis.h>
37#include <VBox/disopcode.h>
38#include <VBox/err.h>
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43
44
45/**
46 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
47 * PATM all access handler callback.}
48 *
49 * @remarks The @a pvUser argument is the base address of the page being
50 * monitored.
51 */
52DECLEXPORT(VBOXSTRICTRC) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore,
53 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
54{
55 NOREF(pVCpu); NOREF(uErrorCode); NOREF(pCtxCore); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); RT_NOREF_PV(pvUser);
56
57 Assert(pvUser);
58 Assert(!((uintptr_t)pvUser & PAGE_OFFSET_MASK));
59 Assert(((uintptr_t)pvUser + (pvFault & PAGE_OFFSET_MASK)) == pvRange + offRange);
60
61 pVM->patm.s.pvFaultMonitor = (RTRCPTR)(pvRange + offRange);
62 return VINF_PATM_CHECK_PATCH_PAGE;
63}
64
65
66/**
67 * Checks if the write is located on a page with was patched before.
68 * (if so, then we are not allowed to turn on r/w)
69 *
70 * @returns Strict VBox status code.
71 * @retval VINF_SUCCESS if access interpreted (@a pCtxCore != NULL).
72 * @retval VINF_PGM_HANDLER_DO_DEFAULT (@a pCtxCore == NULL).
73 * @retval VINF_EM_RAW_EMULATE_INSTR on needing to go to ring-3 to do this.
74 * @retval VERR_PATCH_NOT_FOUND if no patch was found.
75 *
76 * @param pVM The cross context VM structure.
77 * @param pCtxCore CPU context if \#PF, NULL if other write..
78 * @param GCPtr GC pointer to write address.
79 * @param cbWrite Number of bytes to write.
80 *
81 */
82VMMRC_INT_DECL(VBOXSTRICTRC) PATMRCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR GCPtr, uint32_t cbWrite)
83{
84 Assert(cbWrite > 0);
85
86 /* Quick boundary check */
87 if ( PAGE_ADDRESS(GCPtr) < PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCLowest)
88 || PAGE_ADDRESS(GCPtr) > PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCHighest))
89 return VERR_PATCH_NOT_FOUND;
90
91 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWriteDetect, a);
92
93 /*
94 * Lookup the patch page record for the write.
95 */
96 RTRCUINTPTR pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
97 RTRCUINTPTR pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
98
99 PPATMPATCHPAGE pPatchPage;
100 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageStart);
101 if ( !pPatchPage
102 && pWritePageStart != pWritePageEnd)
103 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageEnd);
104 if (pPatchPage)
105 {
106 Log(("PATMGCHandleWriteToPatchPage: Found page %RRv for write to %RRv %d bytes (page low:high %RRv:%RRv\n",
107 pPatchPage->Core.Key, GCPtr, cbWrite, pPatchPage->pLowestAddrGC, pPatchPage->pHighestAddrGC));
108 if ( (RTRCUINTPTR)pPatchPage->pLowestAddrGC > (RTRCUINTPTR)GCPtr + cbWrite - 1U
109 || (RTRCUINTPTR)pPatchPage->pHighestAddrGC < (RTRCUINTPTR)GCPtr)
110 {
111 /* This part of the page was not patched; try to emulate the instruction / tell the caller to do so. */
112 if (!pCtxCore)
113 {
114 LogFlow(("PATMHandleWriteToPatchPage: Allow writing %RRv LB %#x\n", GCPtr, cbWrite));
115 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
116 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
117 return VINF_PGM_HANDLER_DO_DEFAULT;
118 }
119 LogFlow(("PATMHandleWriteToPatchPage: Interpret %#x accessing %RRv\n", pCtxCore->eip, GCPtr));
120 int rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(VMMGetCpu0(pVM), pCtxCore, (RTGCPTR)(RTRCUINTPTR)GCPtr));
121 if (rc == VINF_SUCCESS)
122 {
123 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
124 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
125 return VINF_SUCCESS;
126 }
127 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpretedFailed);
128 }
129 R3PTRTYPE(PPATCHINFO) *paPatch = (R3PTRTYPE(PPATCHINFO) *)MMHyperR3ToRC(pVM, pPatchPage->papPatch);
130
131 /* Increase the invalid write counter for each patch that's registered for that page. */
132 for (uint32_t i=0;i<pPatchPage->cCount;i++)
133 {
134 PPATCHINFO pPatch = (PPATCHINFO)MMHyperR3ToRC(pVM, paPatch[i]);
135
136 pPatch->cInvalidWrites++;
137 }
138
139 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
140 return VINF_EM_RAW_EMULATE_INSTR;
141 }
142
143 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
144 return VERR_PATCH_NOT_FOUND;
145}
146
147
148/**
149 * Checks if the illegal instruction was caused by a patched instruction
150 *
151 * @returns VBox status
152 *
153 * @param pVM The cross context VM structure.
154 * @param pCtxCore The relevant core context.
155 */
156VMMRC_INT_DECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pCtxCore)
157{
158 PPATMPATCHREC pRec;
159 PVMCPU pVCpu = VMMGetCpu0(pVM);
160 int rc;
161
162 /* Very important check -> otherwise we have a security leak. */
163 AssertReturn(!pCtxCore->eflags.Bits.u1VM && (pCtxCore->ss.Sel & X86_SEL_RPL) <= (EMIsRawRing1Enabled(pVM) ? 2U : 1U),
164 VERR_ACCESS_DENIED);
165 Assert(PATMIsPatchGCAddr(pVM, pCtxCore->eip));
166
167 /* OP_ILLUD2 in PATM generated code? */
168 if (CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
169 {
170 LogFlow(("PATMRC: Pending action %x at %x\n", CTXSUFF(pVM->patm.s.pGCState)->uPendingAction, pCtxCore->eip));
171
172 /* Private PATM interface (@todo hack due to lack of anything generic). */
173 /* Parameters:
174 * eax = Pending action (currently PATM_ACTION_LOOKUP_ADDRESS)
175 * ecx = PATM_ACTION_MAGIC
176 */
177 if ( (pCtxCore->eax & CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
178 && pCtxCore->ecx == PATM_ACTION_MAGIC
179 )
180 {
181 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
182
183 switch (pCtxCore->eax)
184 {
185 case PATM_ACTION_LOOKUP_ADDRESS:
186 {
187 /* Parameters:
188 * edx = GC address to find
189 * edi = PATCHJUMPTABLE ptr
190 */
191 AssertMsg(!pCtxCore->edi || PATMIsPatchGCAddr(pVM, pCtxCore->edi), ("edi = %x\n", pCtxCore->edi));
192
193 Log(("PATMRC: lookup %x jump table=%x\n", pCtxCore->edx, pCtxCore->edi));
194
195 pRec = patmQueryFunctionPatch(pVM, (RTRCPTR)pCtxCore->edx);
196 if (pRec)
197 {
198 if (pRec->patch.uState == PATCH_ENABLED)
199 {
200 RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */
201 rc = patmAddBranchToLookupCache(pVM, (RTRCPTR)pCtxCore->edi, (RTRCPTR)pCtxCore->edx, pRelAddr);
202 if (rc == VINF_SUCCESS)
203 {
204 Log(("Patch block %RRv called as function\n", pRec->patch.pPrivInstrGC));
205 pRec->patch.flags |= PATMFL_CODE_REFERENCED;
206
207 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
208 pCtxCore->eax = pRelAddr;
209 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionFound);
210 return VINF_SUCCESS;
211 }
212 AssertFailed();
213 return rc;
214 }
215 else
216 {
217 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
218 pCtxCore->eax = 0; /* make it fault */
219 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
220 return VINF_SUCCESS;
221 }
222 }
223 else
224 {
225 /* Check first before trying to generate a function/trampoline patch. */
226 if (pVM->patm.s.fOutOfMemory)
227 {
228 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
229 pCtxCore->eax = 0; /* make it fault */
230 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
231 return VINF_SUCCESS;
232 }
233 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
234 return VINF_PATM_DUPLICATE_FUNCTION;
235 }
236 }
237
238 case PATM_ACTION_DISPATCH_PENDING_IRQ:
239 /* Parameters:
240 * edi = GC address to jump to
241 */
242 Log(("PATMRC: Dispatch pending interrupt; eip=%x->%x\n", pCtxCore->eip, pCtxCore->edi));
243
244 /* Change EIP to the guest address the patch would normally jump to after setting IF. */
245 pCtxCore->eip = pCtxCore->edi;
246
247 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
248 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
249
250 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
251 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
252 pCtxCore->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
253
254 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
255
256 /* We are no longer executing PATM code; set PIF again. */
257 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
258
259 STAM_COUNTER_INC(&pVM->patm.s.StatCheckPendingIRQ);
260
261 /* The caller will call trpmGCExitTrap, which will dispatch pending interrupts for us. */
262 return VINF_SUCCESS;
263
264 case PATM_ACTION_PENDING_IRQ_AFTER_IRET:
265 /* Parameters:
266 * edi = GC address to jump to
267 */
268 Log(("PATMRC: Dispatch pending interrupt (iret); eip=%x->%x\n", pCtxCore->eip, pCtxCore->edi));
269 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
270 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
271
272 /* Change EIP to the guest address of the iret. */
273 pCtxCore->eip = pCtxCore->edi;
274
275 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
276 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
277 pCtxCore->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
278 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
279
280 /* We are no longer executing PATM code; set PIF again. */
281 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
282
283 return VINF_PATM_PENDING_IRQ_AFTER_IRET;
284
285 case PATM_ACTION_DO_V86_IRET:
286 {
287 Log(("PATMRC: Do iret to V86 code; eip=%x\n", pCtxCore->eip));
288 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX));
289 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
290
291 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
292 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
293 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
294
295 rc = EMInterpretIretV86ForPatm(pVM, pVCpu, pCtxCore);
296 if (RT_SUCCESS(rc))
297 {
298 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIret);
299
300 /* We are no longer executing PATM code; set PIF again. */
301 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
302 PGMRZDynMapReleaseAutoSet(pVCpu);
303 CPUMGCCallV86Code(pCtxCore);
304 /* does not return */
305 }
306 else
307 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIretFailed);
308 return rc;
309 }
310
311#ifdef DEBUG
312 case PATM_ACTION_LOG_CLI:
313 Log(("PATMRC: CLI at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
314 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
315 return VINF_SUCCESS;
316
317 case PATM_ACTION_LOG_STI:
318 Log(("PATMRC: STI at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
319 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
320 return VINF_SUCCESS;
321
322 case PATM_ACTION_LOG_POPF_IF1:
323 Log(("PATMRC: POPF setting IF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
324 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
325 return VINF_SUCCESS;
326
327 case PATM_ACTION_LOG_POPF_IF0:
328 Log(("PATMRC: POPF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
329 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
330 return VINF_SUCCESS;
331
332 case PATM_ACTION_LOG_PUSHF:
333 Log(("PATMRC: PUSHF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
334 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
335 return VINF_SUCCESS;
336
337 case PATM_ACTION_LOG_IF1:
338 Log(("PATMRC: IF=1 escape from %x\n", pCtxCore->eip));
339 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
340 return VINF_SUCCESS;
341
342 case PATM_ACTION_LOG_IRET:
343 {
344 char *pIretFrame = (char *)pCtxCore->edx;
345 uint32_t eip, selCS, uEFlags;
346
347 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
348 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
349 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
350 if (rc == VINF_SUCCESS)
351 {
352 if ( (uEFlags & X86_EFL_VM)
353 || (selCS & X86_SEL_RPL) == 3)
354 {
355 uint32_t selSS, esp;
356
357 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
358 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
359
360 if (uEFlags & X86_EFL_VM)
361 {
362 uint32_t selDS, selES, selFS, selGS;
363 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
364 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
365 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
366 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
367 if (rc == VINF_SUCCESS)
368 {
369 Log(("PATMRC: IRET->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
370 Log(("PATMRC: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
371 }
372 }
373 else
374 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
375 }
376 else
377 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
378 }
379 Log(("PATMRC: IRET from %x (IF->1) current eflags=%x\n", pCtxCore->eip, pVM->patm.s.CTXSUFF(pGCState)->uVMFlags));
380 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
381 return VINF_SUCCESS;
382 }
383
384 case PATM_ACTION_LOG_GATE_ENTRY:
385 {
386 char *pIretFrame = (char *)pCtxCore->edx;
387 uint32_t eip, selCS, uEFlags;
388
389 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
390 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
391 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
392 if (rc == VINF_SUCCESS)
393 {
394 if ( (uEFlags & X86_EFL_VM)
395 || (selCS & X86_SEL_RPL) == 3)
396 {
397 uint32_t selSS, esp;
398
399 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
400 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
401
402 if (uEFlags & X86_EFL_VM)
403 {
404 uint32_t selDS, selES, selFS, selGS;
405 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
406 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
407 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
408 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
409 if (rc == VINF_SUCCESS)
410 {
411 Log(("PATMRC: GATE->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
412 Log(("PATMRC: GATE->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
413 }
414 }
415 else
416 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
417 }
418 else
419 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
420 }
421 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
422 return VINF_SUCCESS;
423 }
424
425 case PATM_ACTION_LOG_RET:
426 Log(("PATMRC: RET from %x to %x ESP=%x iopl=%d\n", pCtxCore->eip, pCtxCore->edx, pCtxCore->ebx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
427 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
428 return VINF_SUCCESS;
429
430 case PATM_ACTION_LOG_CALL:
431 Log(("PATMRC: CALL to %RRv return addr %RRv ESP=%x iopl=%d\n", pVM->patm.s.CTXSUFF(pGCState)->GCCallPatchTargetAddr, pVM->patm.s.CTXSUFF(pGCState)->GCCallReturnAddr, pCtxCore->edx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
432 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
433 return VINF_SUCCESS;
434#endif
435 default:
436 AssertFailed();
437 break;
438 }
439 }
440 else
441 AssertFailed();
442 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
443 }
444 AssertMsgFailed(("Unexpected OP_ILLUD2 in patch code at %x (pending action %x)!!!!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->uPendingAction));
445 return VINF_EM_RAW_EMULATE_INSTR;
446}
447
448/**
449 * Checks if the int 3 was caused by a patched instruction
450 *
451 * @returns Strict VBox status, includes all statuses that
452 * EMInterpretInstructionDisasState and
453 * @retval VINF_SUCCESS
454 * @retval VINF_PATM_PATCH_INT3
455 * @retval VINF_EM_RAW_EMULATE_INSTR
456 *
457 * @param pVM The cross context VM structure.
458 * @param pCtxCore The relevant core context.
459 */
460VMMRC_INT_DECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pCtxCore)
461{
462 PPATMPATCHREC pRec;
463
464 AssertReturn(!pCtxCore->eflags.Bits.u1VM
465 && ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
466 || (EMIsRawRing1Enabled(pVM) && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)), VERR_ACCESS_DENIED);
467
468 /* Int 3 in PATM generated code? (most common case) */
469 if (PATMIsPatchGCAddr(pVM, pCtxCore->eip))
470 {
471 /* Note! Hardcoded assumption about it being a single byte int 3 instruction. */
472 pCtxCore->eip--;
473 return VINF_PATM_PATCH_INT3;
474 }
475
476 /** @todo could use simple caching here to speed things up. */
477 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)(pCtxCore->eip - 1)); /* eip is pointing to the instruction *after* 'int 3' already */
478 if (pRec && pRec->patch.uState == PATCH_ENABLED)
479 {
480 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT_BLOCK)
481 {
482 Assert(pRec->patch.opcode == OP_CLI);
483 /* This is a special cli block that was turned into an int 3 patch. We jump to the generated code manually. */
484 pCtxCore->eip = (uint32_t)PATCHCODE_PTR_GC(&pRec->patch);
485 STAM_COUNTER_INC(&pVM->patm.s.StatInt3BlockRun);
486 return VINF_SUCCESS;
487 }
488 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT)
489 {
490 /* eip is pointing to the instruction *after* 'int 3' already */
491 pCtxCore->eip = pCtxCore->eip - 1;
492
493 PATM_STAT_RUN_INC(&pRec->patch);
494
495 Log(("PATMHandleInt3PatchTrap found int3 for %s at %x\n", patmGetInstructionString(pRec->patch.opcode, 0), pCtxCore->eip));
496
497 switch(pRec->patch.opcode)
498 {
499 case OP_CPUID:
500 case OP_IRET:
501#ifdef VBOX_WITH_RAW_RING1
502 case OP_SMSW:
503 case OP_MOV: /* mov xx, CS */
504#endif
505 break;
506
507 case OP_STR:
508 case OP_SGDT:
509 case OP_SLDT:
510 case OP_SIDT:
511 case OP_LSL:
512 case OP_LAR:
513#ifndef VBOX_WITH_RAW_RING1
514 case OP_SMSW:
515#endif
516 case OP_VERW:
517 case OP_VERR:
518 default:
519 PATM_STAT_FAULT_INC(&pRec->patch);
520 pRec->patch.cTraps++;
521 return VINF_EM_RAW_EMULATE_INSTR;
522 }
523
524 PVMCPU pVCpu = VMMGetCpu0(pVM);
525 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
526 if (enmCpuMode != DISCPUMODE_32BIT)
527 {
528 AssertFailed();
529 return VINF_EM_RAW_EMULATE_INSTR;
530 }
531
532 VBOXSTRICTRC rcStrict;
533 rcStrict = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pCtxCore, pCtxCore->rip,
534 pRec->patch.aPrivInstr, pRec->patch.cbPrivInstr);
535 if (RT_SUCCESS(rcStrict))
536 {
537 if (rcStrict != VINF_SUCCESS)
538 Log(("PATMRCHandleInt3PatchTrap: returns %Rrc\n", VBOXSTRICTRC_TODO(rcStrict)));
539 return VBOXSTRICTRC_TODO(rcStrict);
540 }
541
542 Log(("IEMExecOneBypassWithPrefetchedByPC failed with %Rrc\n", VBOXSTRICTRC_TODO(rcStrict)));
543 PATM_STAT_FAULT_INC(&pRec->patch);
544 pRec->patch.cTraps++;
545 return VINF_EM_RAW_EMULATE_INSTR;
546 }
547 }
548 return VERR_PATCH_NOT_FOUND;
549}
550
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette