VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/PATMRC.cpp@ 55966

Last change on this file since 55966 was 55966, checked in by vboxsync, 10 years ago

PGM,++: VBOXSTRICTRC for physical access handlers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.9 KB
Line 
1/* $Id: PATMRC.cpp 55966 2015-05-20 12:42:53Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager - Raw-mode Context.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PATM
23#include <VBox/vmm/patm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#ifdef VBOX_WITH_IEM
30# include <VBox/vmm/iem.h>
31#endif
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/mm.h>
34#include "PATMInternal.h"
35#include "PATMA.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/err.h>
41#include <VBox/log.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45
46
47/**
48 * \#PF Virtual Handler callback for Guest access a page monitored by PATM
49 *
50 * @returns VBox status code (appropriate for trap handling and GC return).
51 * @param pVM Pointer to the VM.
52 * @param pVCpu Pointer to the cross context CPU context for the
53 * calling EMT.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param pvRange The base address of the handled virtual range.
58 * @param offRange The offset of the access into this range.
59 * (If it's a EIP range this is the EIP, if not it's pvFault.)
60 * @param pvUser The physical address of the guest page being monitored.
61 */
62DECLEXPORT(int) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
63 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
64{
65 NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
66 Assert(pvUser); Assert(!((uintptr_t)pvUser & PAGE_OFFSET_MASK));
67 pVM->patm.s.pvFaultMonitor = (RTRCPTR)((uintptr_t)pvUser + (pvFault & PAGE_OFFSET_MASK));
68 return VINF_PATM_CHECK_PATCH_PAGE;
69}
70
71
72/**
73 * Checks if the write is located on a page with was patched before.
74 * (if so, then we are not allowed to turn on r/w)
75 *
76 * @returns Strict VBox status code.
77 * @retval VINF_SUCCESS if access interpreted (@a pRegFrame != NULL).
78 * @retval VINF_PGM_HANDLER_DO_DEFAULT (@a pRegFrame == NULL).
79 * @retval VINF_EM_RAW_EMULATE_INSTR on needing to go to ring-3 to do this.
80 * @retval VERR_PATCH_NOT_FOUND if no patch was found.
81 *
82 * @param pVM Pointer to the VM.
83 * @param pRegFrame CPU context if \#PF, NULL if other write..
84 * @param GCPtr GC pointer to write address.
85 * @param cbWrite Number of bytes to write.
86 *
87 */
88VMMRC_INT_DECL(VBOXSTRICTRC) PATMRCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pRegFrame, RTRCPTR GCPtr, uint32_t cbWrite)
89{
90 Assert(cbWrite > 0);
91
92 /* Quick boundary check */
93 if ( PAGE_ADDRESS(GCPtr) < PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCLowest)
94 || PAGE_ADDRESS(GCPtr) > PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCHighest))
95 return VERR_PATCH_NOT_FOUND;
96
97 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWriteDetect, a);
98
99 /*
100 * Lookup the patch page record for the write.
101 */
102 RTRCUINTPTR pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
103 RTRCUINTPTR pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
104
105 PPATMPATCHPAGE pPatchPage;
106 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageStart);
107 if ( !pPatchPage
108 && pWritePageStart != pWritePageEnd)
109 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageEnd);
110 if (pPatchPage)
111 {
112 Log(("PATMGCHandleWriteToPatchPage: Found page %RRv for write to %RRv %d bytes (page low:high %RRv:%RRv\n",
113 pPatchPage->Core.Key, GCPtr, cbWrite, pPatchPage->pLowestAddrGC, pPatchPage->pHighestAddrGC));
114 if ( (RTRCUINTPTR)pPatchPage->pLowestAddrGC > (RTRCUINTPTR)GCPtr + cbWrite - 1U
115 || (RTRCUINTPTR)pPatchPage->pHighestAddrGC < (RTRCUINTPTR)GCPtr)
116 {
117 /* This part of the page was not patched; try to emulate the instruction / tell the caller to do so. */
118 if (!pRegFrame)
119 {
120 LogFlow(("PATMHandleWriteToPatchPage: Allow writing %RRv LB %#x\n", pRegFrame->eip, GCPtr, cbWrite));
121 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
122 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
123 return VINF_PGM_HANDLER_DO_DEFAULT;
124 }
125 LogFlow(("PATMHandleWriteToPatchPage: Interpret %x accessing %RRv\n", pRegFrame->eip, GCPtr));
126 int rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(VMMGetCpu0(pVM), pRegFrame, (RTGCPTR)(RTRCUINTPTR)GCPtr));
127 if (rc == VINF_SUCCESS)
128 {
129 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
130 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
131 return VINF_SUCCESS;
132 }
133 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpretedFailed);
134 }
135 R3PTRTYPE(PPATCHINFO) *paPatch = (R3PTRTYPE(PPATCHINFO) *)MMHyperR3ToRC(pVM, pPatchPage->papPatch);
136
137 /* Increase the invalid write counter for each patch that's registered for that page. */
138 for (uint32_t i=0;i<pPatchPage->cCount;i++)
139 {
140 PPATCHINFO pPatch = (PPATCHINFO)MMHyperR3ToRC(pVM, paPatch[i]);
141
142 pPatch->cInvalidWrites++;
143 }
144
145 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
146 return VINF_EM_RAW_EMULATE_INSTR;
147 }
148
149 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
150 return VERR_PATCH_NOT_FOUND;
151}
152
153
154/**
155 * Checks if the illegal instruction was caused by a patched instruction
156 *
157 * @returns VBox status
158 *
159 * @param pVM Pointer to the VM.
160 * @param pCtxCore The relevant core context.
161 */
162VMMRC_INT_DECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pRegFrame)
163{
164 PPATMPATCHREC pRec;
165 PVMCPU pVCpu = VMMGetCpu0(pVM);
166 int rc;
167
168 /* Very important check -> otherwise we have a security leak. */
169 AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) <= (EMIsRawRing1Enabled(pVM) ? 2U : 1U),
170 VERR_ACCESS_DENIED);
171 Assert(PATMIsPatchGCAddr(pVM, pRegFrame->eip));
172
173 /* OP_ILLUD2 in PATM generated code? */
174 if (CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
175 {
176 LogFlow(("PATMRC: Pending action %x at %x\n", CTXSUFF(pVM->patm.s.pGCState)->uPendingAction, pRegFrame->eip));
177
178 /* Private PATM interface (@todo hack due to lack of anything generic). */
179 /* Parameters:
180 * eax = Pending action (currently PATM_ACTION_LOOKUP_ADDRESS)
181 * ecx = PATM_ACTION_MAGIC
182 */
183 if ( (pRegFrame->eax & CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
184 && pRegFrame->ecx == PATM_ACTION_MAGIC
185 )
186 {
187 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
188
189 switch (pRegFrame->eax)
190 {
191 case PATM_ACTION_LOOKUP_ADDRESS:
192 {
193 /* Parameters:
194 * edx = GC address to find
195 * edi = PATCHJUMPTABLE ptr
196 */
197 AssertMsg(!pRegFrame->edi || PATMIsPatchGCAddr(pVM, pRegFrame->edi), ("edi = %x\n", pRegFrame->edi));
198
199 Log(("PATMRC: lookup %x jump table=%x\n", pRegFrame->edx, pRegFrame->edi));
200
201 pRec = patmQueryFunctionPatch(pVM, (RTRCPTR)pRegFrame->edx);
202 if (pRec)
203 {
204 if (pRec->patch.uState == PATCH_ENABLED)
205 {
206 RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */
207 rc = patmAddBranchToLookupCache(pVM, (RTRCPTR)pRegFrame->edi, (RTRCPTR)pRegFrame->edx, pRelAddr);
208 if (rc == VINF_SUCCESS)
209 {
210 Log(("Patch block %RRv called as function\n", pRec->patch.pPrivInstrGC));
211 pRec->patch.flags |= PATMFL_CODE_REFERENCED;
212
213 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
214 pRegFrame->eax = pRelAddr;
215 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionFound);
216 return VINF_SUCCESS;
217 }
218 AssertFailed();
219 }
220 else
221 {
222 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
223 pRegFrame->eax = 0; /* make it fault */
224 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
225 return VINF_SUCCESS;
226 }
227 }
228 else
229 {
230 /* Check first before trying to generate a function/trampoline patch. */
231 if (pVM->patm.s.fOutOfMemory)
232 {
233 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
234 pRegFrame->eax = 0; /* make it fault */
235 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
236 return VINF_SUCCESS;
237 }
238 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
239 return VINF_PATM_DUPLICATE_FUNCTION;
240 }
241 }
242
243 case PATM_ACTION_DISPATCH_PENDING_IRQ:
244 /* Parameters:
245 * edi = GC address to jump to
246 */
247 Log(("PATMRC: Dispatch pending interrupt; eip=%x->%x\n", pRegFrame->eip, pRegFrame->edi));
248
249 /* Change EIP to the guest address the patch would normally jump to after setting IF. */
250 pRegFrame->eip = pRegFrame->edi;
251
252 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
253 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
254
255 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
256 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
257 pRegFrame->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
258
259 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
260
261 /* We are no longer executing PATM code; set PIF again. */
262 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
263
264 STAM_COUNTER_INC(&pVM->patm.s.StatCheckPendingIRQ);
265
266 /* The caller will call trpmGCExitTrap, which will dispatch pending interrupts for us. */
267 return VINF_SUCCESS;
268
269 case PATM_ACTION_PENDING_IRQ_AFTER_IRET:
270 /* Parameters:
271 * edi = GC address to jump to
272 */
273 Log(("PATMRC: Dispatch pending interrupt (iret); eip=%x->%x\n", pRegFrame->eip, pRegFrame->edi));
274 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
275 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
276
277 /* Change EIP to the guest address of the iret. */
278 pRegFrame->eip = pRegFrame->edi;
279
280 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
281 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
282 pRegFrame->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
283 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
284
285 /* We are no longer executing PATM code; set PIF again. */
286 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
287
288 return VINF_PATM_PENDING_IRQ_AFTER_IRET;
289
290 case PATM_ACTION_DO_V86_IRET:
291 {
292 Log(("PATMRC: Do iret to V86 code; eip=%x\n", pRegFrame->eip));
293 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX));
294 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
295
296 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
297 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
298 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
299
300 rc = EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
301 if (RT_SUCCESS(rc))
302 {
303 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIret);
304
305 /* We are no longer executing PATM code; set PIF again. */
306 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
307 PGMRZDynMapReleaseAutoSet(pVCpu);
308 CPUMGCCallV86Code(pRegFrame);
309 /* does not return */
310 }
311 else
312 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIretFailed);
313 return rc;
314 }
315
316#ifdef DEBUG
317 case PATM_ACTION_LOG_CLI:
318 Log(("PATMRC: CLI at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
319 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
320 return VINF_SUCCESS;
321
322 case PATM_ACTION_LOG_STI:
323 Log(("PATMRC: STI at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
324 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
325 return VINF_SUCCESS;
326
327 case PATM_ACTION_LOG_POPF_IF1:
328 Log(("PATMRC: POPF setting IF at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
329 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
330 return VINF_SUCCESS;
331
332 case PATM_ACTION_LOG_POPF_IF0:
333 Log(("PATMRC: POPF at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
334 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
335 return VINF_SUCCESS;
336
337 case PATM_ACTION_LOG_PUSHF:
338 Log(("PATMRC: PUSHF at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
339 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
340 return VINF_SUCCESS;
341
342 case PATM_ACTION_LOG_IF1:
343 Log(("PATMRC: IF=1 escape from %x\n", pRegFrame->eip));
344 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
345 return VINF_SUCCESS;
346
347 case PATM_ACTION_LOG_IRET:
348 {
349 char *pIretFrame = (char *)pRegFrame->edx;
350 uint32_t eip, selCS, uEFlags;
351
352 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
353 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
354 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
355 if (rc == VINF_SUCCESS)
356 {
357 if ( (uEFlags & X86_EFL_VM)
358 || (selCS & X86_SEL_RPL) == 3)
359 {
360 uint32_t selSS, esp;
361
362 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
363 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
364
365 if (uEFlags & X86_EFL_VM)
366 {
367 uint32_t selDS, selES, selFS, selGS;
368 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
369 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
370 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
371 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
372 if (rc == VINF_SUCCESS)
373 {
374 Log(("PATMRC: IRET->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
375 Log(("PATMRC: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
376 }
377 }
378 else
379 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
380 }
381 else
382 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
383 }
384 Log(("PATMRC: IRET from %x (IF->1) current eflags=%x\n", pRegFrame->eip, pVM->patm.s.CTXSUFF(pGCState)->uVMFlags));
385 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
386 return VINF_SUCCESS;
387 }
388
389 case PATM_ACTION_LOG_GATE_ENTRY:
390 {
391 char *pIretFrame = (char *)pRegFrame->edx;
392 uint32_t eip, selCS, uEFlags;
393
394 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
395 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
396 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
397 if (rc == VINF_SUCCESS)
398 {
399 if ( (uEFlags & X86_EFL_VM)
400 || (selCS & X86_SEL_RPL) == 3)
401 {
402 uint32_t selSS, esp;
403
404 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
405 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
406
407 if (uEFlags & X86_EFL_VM)
408 {
409 uint32_t selDS, selES, selFS, selGS;
410 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
411 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
412 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
413 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
414 if (rc == VINF_SUCCESS)
415 {
416 Log(("PATMRC: GATE->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
417 Log(("PATMRC: GATE->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
418 }
419 }
420 else
421 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
422 }
423 else
424 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
425 }
426 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
427 return VINF_SUCCESS;
428 }
429
430 case PATM_ACTION_LOG_RET:
431 Log(("PATMRC: RET from %x to %x ESP=%x iopl=%d\n", pRegFrame->eip, pRegFrame->edx, pRegFrame->ebx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
432 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
433 return VINF_SUCCESS;
434
435 case PATM_ACTION_LOG_CALL:
436 Log(("PATMRC: CALL to %RRv return addr %RRv ESP=%x iopl=%d\n", pVM->patm.s.CTXSUFF(pGCState)->GCCallPatchTargetAddr, pVM->patm.s.CTXSUFF(pGCState)->GCCallReturnAddr, pRegFrame->edx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
437 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
438 return VINF_SUCCESS;
439#endif
440 default:
441 AssertFailed();
442 break;
443 }
444 }
445 else
446 AssertFailed();
447 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
448 }
449 AssertMsgFailed(("Unexpected OP_ILLUD2 in patch code at %x (pending action %x)!!!!\n", pRegFrame->eip, CTXSUFF(pVM->patm.s.pGCState)->uPendingAction));
450 return VINF_EM_RAW_EMULATE_INSTR;
451}
452
453/**
454 * Checks if the int 3 was caused by a patched instruction
455 *
456 * @returns Strict VBox status, includes all statuses that
457 * EMInterpretInstructionDisasState and
458 * @retval VINF_SUCCESS
459 * @retval VINF_PATM_PATCH_INT3
460 * @retval VINF_EM_RAW_EMULATE_INSTR
461 *
462 * @param pVM Pointer to the VM.
463 * @param pCtxCore The relevant core context.
464 */
465VMMRC_INT_DECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pRegFrame)
466{
467 PPATMPATCHREC pRec;
468 int rc;
469
470 AssertReturn(!pRegFrame->eflags.Bits.u1VM
471 && ( (pRegFrame->ss.Sel & X86_SEL_RPL) == 1
472 || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 2)), VERR_ACCESS_DENIED);
473
474 /* Int 3 in PATM generated code? (most common case) */
475 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
476 {
477 /* Note! Hardcoded assumption about it being a single byte int 3 instruction. */
478 pRegFrame->eip--;
479 return VINF_PATM_PATCH_INT3;
480 }
481
482 /** @todo could use simple caching here to speed things up. */
483 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)(pRegFrame->eip - 1)); /* eip is pointing to the instruction *after* 'int 3' already */
484 if (pRec && pRec->patch.uState == PATCH_ENABLED)
485 {
486 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT_BLOCK)
487 {
488 Assert(pRec->patch.opcode == OP_CLI);
489 /* This is a special cli block that was turned into an int 3 patch. We jump to the generated code manually. */
490 pRegFrame->eip = (uint32_t)PATCHCODE_PTR_GC(&pRec->patch);
491 STAM_COUNTER_INC(&pVM->patm.s.StatInt3BlockRun);
492 return VINF_SUCCESS;
493 }
494 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT)
495 {
496 /* eip is pointing to the instruction *after* 'int 3' already */
497 pRegFrame->eip = pRegFrame->eip - 1;
498
499 PATM_STAT_RUN_INC(&pRec->patch);
500
501 Log(("PATMHandleInt3PatchTrap found int3 for %s at %x\n", patmGetInstructionString(pRec->patch.opcode, 0), pRegFrame->eip));
502
503 switch(pRec->patch.opcode)
504 {
505 case OP_CPUID:
506 case OP_IRET:
507#ifdef VBOX_WITH_RAW_RING1
508 case OP_SMSW:
509 case OP_MOV: /* mov xx, CS */
510#endif
511 break;
512
513 case OP_STR:
514 case OP_SGDT:
515 case OP_SLDT:
516 case OP_SIDT:
517 case OP_LSL:
518 case OP_LAR:
519#ifndef VBOX_WITH_RAW_RING1
520 case OP_SMSW:
521#endif
522 case OP_VERW:
523 case OP_VERR:
524 default:
525 PATM_STAT_FAULT_INC(&pRec->patch);
526 pRec->patch.cTraps++;
527 return VINF_EM_RAW_EMULATE_INSTR;
528 }
529
530 PVMCPU pVCpu = VMMGetCpu0(pVM);
531 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
532 if (enmCpuMode != DISCPUMODE_32BIT)
533 {
534 AssertFailed();
535 return VINF_EM_RAW_EMULATE_INSTR;
536 }
537
538#ifdef VBOX_WITH_IEM
539 VBOXSTRICTRC rcStrict;
540 rcStrict = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip,
541 pRec->patch.aPrivInstr, pRec->patch.cbPrivInstr);
542 rc = VBOXSTRICTRC_TODO(rcStrict);
543#else
544 uint32_t cbOp;
545 DISCPUSTATE cpu;
546 rc = DISInstr(&pRec->patch.aPrivInstr[0], enmCpuMode, &cpu, &cbOp);
547 if (RT_FAILURE(rc))
548 {
549 Log(("DISCoreOne failed with %Rrc\n", rc));
550 PATM_STAT_FAULT_INC(&pRec->patch);
551 pRec->patch.cTraps++;
552 return VINF_EM_RAW_EMULATE_INSTR;
553 }
554
555 rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, &cpu, pRegFrame, 0 /* not relevant here */,
556 EMCODETYPE_SUPERVISOR));
557#endif
558 if (RT_FAILURE(rc))
559 {
560 Log(("EMInterpretInstructionCPU failed with %Rrc\n", rc));
561 PATM_STAT_FAULT_INC(&pRec->patch);
562 pRec->patch.cTraps++;
563 return VINF_EM_RAW_EMULATE_INSTR;
564 }
565 return rc;
566 }
567 }
568 return VERR_PATCH_NOT_FOUND;
569}
570
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette