VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/PATMRC.cpp@ 45276

Last change on this file since 45276 was 45276, checked in by vboxsync, 12 years ago

Ring-1 compression patches, courtesy of trivirt AG:

  • main: diff to remove the hwvirt requirement for QNX
  • rem: diff for dealing with raw ring 0/1 selectors and general changes to allowed guest execution states
  • vmm: changes for using the guest's TSS selector index as our hypervisor TSS selector (makes str safe) (VBOX_WITH_SAFE_STR )
  • vmm: changes for dealing with guest ring 1 code (VBOX_WITH_RAW_RING1)
  • vmm: change to emulate smsw in RC/R0 (QNX uses this old style instruction a lot so going to qemu for emulation is very expensive)
  • vmm: change (hack) to kick out patm virtual handlers in case they conflict with guest GDT/TSS write monitors; we should allow multiple handlers per page, but that change would be rather invasive
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.1 KB
Line 
1/* $Id: PATMRC.cpp 45276 2013-04-02 08:17:11Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager - Raw-mode Context.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PATM
23#include <VBox/vmm/patm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#ifdef VBOX_WITH_IEM
30# include <VBox/vmm/iem.h>
31#endif
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/mm.h>
34#include "PATMInternal.h"
35#include "PATMA.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/err.h>
41#include <VBox/log.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45
46
47/**
48 * \#PF Virtual Handler callback for Guest access a page monitored by PATM
49 *
50 * @returns VBox status code (appropriate for trap handling and GC return).
51 * @param pVM Pointer to the VM.
52 * @param uErrorCode CPU Error code.
53 * @param pRegFrame Trap register frame.
54 * @param pvFault The fault address (cr2).
55 * @param pvRange The base address of the handled virtual range.
56 * @param offRange The offset of the access into this range.
57 * (If it's a EIP range this is the EIP, if not it's pvFault.)
58 */
59VMMRCDECL(int) PATMGCMonitorPage(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
60{
61 NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
62 pVM->patm.s.pvFaultMonitor = (RTRCPTR)(RTRCUINTPTR)pvFault;
63 return VINF_PATM_CHECK_PATCH_PAGE;
64}
65
66
67/**
68 * Checks if the write is located on a page with was patched before.
69 * (if so, then we are not allowed to turn on r/w)
70 *
71 * @returns VBox status
72 * @param pVM Pointer to the VM.
73 * @param pRegFrame CPU context
74 * @param GCPtr GC pointer to write address
75 * @param cbWrite Nr of bytes to write
76 *
77 */
78VMMRC_INT_DECL(int) PATMRCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pRegFrame, RTRCPTR GCPtr, uint32_t cbWrite)
79{
80 RTGCUINTPTR pWritePageStart, pWritePageEnd;
81 PPATMPATCHPAGE pPatchPage;
82
83 /* Quick boundary check */
84 if ( PAGE_ADDRESS(GCPtr) < PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCLowest)
85 || PAGE_ADDRESS(GCPtr) > PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCHighest)
86 )
87 return VERR_PATCH_NOT_FOUND;
88
89 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWriteDetect, a);
90
91 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
92 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
93
94 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(CTXSUFF(&pVM->patm.s.PatchLookupTree)->PatchTreeByPage, (AVLOU32KEY)pWritePageStart);
95 if ( !pPatchPage
96 && pWritePageStart != pWritePageEnd
97 )
98 {
99 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(CTXSUFF(&pVM->patm.s.PatchLookupTree)->PatchTreeByPage, (AVLOU32KEY)pWritePageEnd);
100 }
101
102#ifdef LOG_ENABLED
103 if (pPatchPage)
104 Log(("PATMGCHandleWriteToPatchPage: Found page %RRv for write to %RRv %d bytes (page low:high %RRv:%RRv\n", pPatchPage->Core.Key, GCPtr, cbWrite, pPatchPage->pLowestAddrGC, pPatchPage->pHighestAddrGC));
105#endif
106
107 if (pPatchPage)
108 {
109 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
110 || pPatchPage->pHighestAddrGC < (RTRCPTR)GCPtr)
111 {
112 /* This part of the page was not patched; try to emulate the instruction. */
113 LogFlow(("PATMHandleWriteToPatchPage: Interpret %x accessing %RRv\n", pRegFrame->eip, GCPtr));
114 int rc = EMInterpretInstruction(VMMGetCpu0(pVM), pRegFrame, (RTGCPTR)(RTRCUINTPTR)GCPtr);
115 if (rc == VINF_SUCCESS)
116 {
117 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
118 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
119 return VINF_SUCCESS;
120 }
121 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpretedFailed);
122 }
123 R3PTRTYPE(PPATCHINFO) *paPatch = (R3PTRTYPE(PPATCHINFO) *)MMHyperR3ToRC(pVM, pPatchPage->papPatch);
124
125 /* Increase the invalid write counter for each patch that's registered for that page. */
126 for (uint32_t i=0;i<pPatchPage->cCount;i++)
127 {
128 PPATCHINFO pPatch = (PPATCHINFO)MMHyperR3ToRC(pVM, paPatch[i]);
129
130 pPatch->cInvalidWrites++;
131 }
132
133 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
134 return VINF_EM_RAW_EMULATE_INSTR;
135 }
136
137 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
138 return VERR_PATCH_NOT_FOUND;
139}
140
141
142/**
143 * Checks if the illegal instruction was caused by a patched instruction
144 *
145 * @returns VBox status
146 *
147 * @param pVM Pointer to the VM.
148 * @param pCtxCore The relevant core context.
149 */
150VMMRC_INT_DECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pRegFrame)
151{
152 PPATMPATCHREC pRec;
153 PVMCPU pVCpu = VMMGetCpu0(pVM);
154 int rc;
155
156 /* Very important check -> otherwise we have a security leak. */
157#ifdef VBOX_WITH_RAW_RING1
158 AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) <= (unsigned) (EMIsRawRing1Enabled(pVM) ? 2 : 1), VERR_ACCESS_DENIED);
159#else
160 AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED);
161#endif
162 Assert(PATMIsPatchGCAddr(pVM, pRegFrame->eip));
163
164 /* OP_ILLUD2 in PATM generated code? */
165 if (CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
166 {
167 LogFlow(("PATMRC: Pending action %x at %x\n", CTXSUFF(pVM->patm.s.pGCState)->uPendingAction, pRegFrame->eip));
168
169 /* Private PATM interface (@todo hack due to lack of anything generic). */
170 /* Parameters:
171 * eax = Pending action (currently PATM_ACTION_LOOKUP_ADDRESS)
172 * ecx = PATM_ACTION_MAGIC
173 */
174 if ( (pRegFrame->eax & CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
175 && pRegFrame->ecx == PATM_ACTION_MAGIC
176 )
177 {
178 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
179
180 switch (pRegFrame->eax)
181 {
182 case PATM_ACTION_LOOKUP_ADDRESS:
183 {
184 /* Parameters:
185 * edx = GC address to find
186 * edi = PATCHJUMPTABLE ptr
187 */
188 AssertMsg(!pRegFrame->edi || PATMIsPatchGCAddr(pVM, pRegFrame->edi), ("edi = %x\n", pRegFrame->edi));
189
190 Log(("PATMRC: lookup %x jump table=%x\n", pRegFrame->edx, pRegFrame->edi));
191
192 pRec = patmQueryFunctionPatch(pVM, (RTRCPTR)pRegFrame->edx);
193 if (pRec)
194 {
195 if (pRec->patch.uState == PATCH_ENABLED)
196 {
197 RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */
198 rc = patmAddBranchToLookupCache(pVM, (RTRCPTR)pRegFrame->edi, (RTRCPTR)pRegFrame->edx, pRelAddr);
199 if (rc == VINF_SUCCESS)
200 {
201 Log(("Patch block %RRv called as function\n", pRec->patch.pPrivInstrGC));
202 pRec->patch.flags |= PATMFL_CODE_REFERENCED;
203
204 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
205 pRegFrame->eax = pRelAddr;
206 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionFound);
207 return VINF_SUCCESS;
208 }
209 AssertFailed();
210 }
211 else
212 {
213 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
214 pRegFrame->eax = 0; /* make it fault */
215 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
216 return VINF_SUCCESS;
217 }
218 }
219 else
220 {
221 /* Check first before trying to generate a function/trampoline patch. */
222 if (pVM->patm.s.fOutOfMemory)
223 {
224 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
225 pRegFrame->eax = 0; /* make it fault */
226 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
227 return VINF_SUCCESS;
228 }
229 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
230 return VINF_PATM_DUPLICATE_FUNCTION;
231 }
232 }
233
234 case PATM_ACTION_DISPATCH_PENDING_IRQ:
235 /* Parameters:
236 * edi = GC address to jump to
237 */
238 Log(("PATMRC: Dispatch pending interrupt; eip=%x->%x\n", pRegFrame->eip, pRegFrame->edi));
239
240 /* Change EIP to the guest address the patch would normally jump to after setting IF. */
241 pRegFrame->eip = pRegFrame->edi;
242
243 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
244 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
245
246 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
247 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
248 pRegFrame->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
249
250 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
251
252 /* We are no longer executing PATM code; set PIF again. */
253 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
254
255 STAM_COUNTER_INC(&pVM->patm.s.StatCheckPendingIRQ);
256
257 /* The caller will call trpmGCExitTrap, which will dispatch pending interrupts for us. */
258 return VINF_SUCCESS;
259
260 case PATM_ACTION_PENDING_IRQ_AFTER_IRET:
261 /* Parameters:
262 * edi = GC address to jump to
263 */
264 Log(("PATMRC: Dispatch pending interrupt (iret); eip=%x->%x\n", pRegFrame->eip, pRegFrame->edi));
265 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
266 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
267
268 /* Change EIP to the guest address of the iret. */
269 pRegFrame->eip = pRegFrame->edi;
270
271 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
272 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
273 pRegFrame->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
274 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
275
276 /* We are no longer executing PATM code; set PIF again. */
277 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
278
279 return VINF_PATM_PENDING_IRQ_AFTER_IRET;
280
281 case PATM_ACTION_DO_V86_IRET:
282 {
283 Log(("PATMRC: Do iret to V86 code; eip=%x\n", pRegFrame->eip));
284 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX));
285 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
286
287 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
288 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
289 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
290
291 rc = EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
292 if (RT_SUCCESS(rc))
293 {
294 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIret);
295
296 /* We are no longer executing PATM code; set PIF again. */
297 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
298 PGMRZDynMapReleaseAutoSet(pVCpu);
299 CPUMGCCallV86Code(pRegFrame);
300 /* does not return */
301 }
302 else
303 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIretFailed);
304 return rc;
305 }
306
307#ifdef DEBUG
308 case PATM_ACTION_LOG_CLI:
309 Log(("PATMRC: CLI at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
310 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
311 return VINF_SUCCESS;
312
313 case PATM_ACTION_LOG_STI:
314 Log(("PATMRC: STI at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
315 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
316 return VINF_SUCCESS;
317
318 case PATM_ACTION_LOG_POPF_IF1:
319 Log(("PATMRC: POPF setting IF at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
320 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
321 return VINF_SUCCESS;
322
323 case PATM_ACTION_LOG_POPF_IF0:
324 Log(("PATMRC: POPF at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
325 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
326 return VINF_SUCCESS;
327
328 case PATM_ACTION_LOG_PUSHF:
329 Log(("PATMRC: PUSHF at %x (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
330 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
331 return VINF_SUCCESS;
332
333 case PATM_ACTION_LOG_IF1:
334 Log(("PATMRC: IF=1 escape from %x\n", pRegFrame->eip));
335 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
336 return VINF_SUCCESS;
337
338 case PATM_ACTION_LOG_IRET:
339 {
340 char *pIretFrame = (char *)pRegFrame->edx;
341 uint32_t eip, selCS, uEFlags;
342
343 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
344 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
345 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
346 if (rc == VINF_SUCCESS)
347 {
348 if ( (uEFlags & X86_EFL_VM)
349 || (selCS & X86_SEL_RPL) == 3)
350 {
351 uint32_t selSS, esp;
352
353 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
354 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
355
356 if (uEFlags & X86_EFL_VM)
357 {
358 uint32_t selDS, selES, selFS, selGS;
359 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
360 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
361 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
362 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
363 if (rc == VINF_SUCCESS)
364 {
365 Log(("PATMRC: IRET->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
366 Log(("PATMRC: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
367 }
368 }
369 else
370 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
371 }
372 else
373 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
374 }
375 Log(("PATMRC: IRET from %x (IF->1) current eflags=%x\n", pRegFrame->eip, pVM->patm.s.CTXSUFF(pGCState)->uVMFlags));
376 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
377 return VINF_SUCCESS;
378 }
379
380 case PATM_ACTION_LOG_GATE_ENTRY:
381 {
382 char *pIretFrame = (char *)pRegFrame->edx;
383 uint32_t eip, selCS, uEFlags;
384
385 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
386 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
387 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
388 if (rc == VINF_SUCCESS)
389 {
390 if ( (uEFlags & X86_EFL_VM)
391 || (selCS & X86_SEL_RPL) == 3)
392 {
393 uint32_t selSS, esp;
394
395 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
396 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
397
398 if (uEFlags & X86_EFL_VM)
399 {
400 uint32_t selDS, selES, selFS, selGS;
401 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
402 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
403 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
404 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
405 if (rc == VINF_SUCCESS)
406 {
407 Log(("PATMRC: GATE->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
408 Log(("PATMRC: GATE->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
409 }
410 }
411 else
412 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
413 }
414 else
415 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
416 }
417 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
418 return VINF_SUCCESS;
419 }
420
421 case PATM_ACTION_LOG_RET:
422 Log(("PATMRC: RET from %x to %x ESP=%x iopl=%d\n", pRegFrame->eip, pRegFrame->edx, pRegFrame->ebx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
423 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
424 return VINF_SUCCESS;
425
426 case PATM_ACTION_LOG_CALL:
427 Log(("PATMRC: CALL to %RRv return addr %RRv ESP=%x iopl=%d\n", pVM->patm.s.CTXSUFF(pGCState)->GCCallPatchTargetAddr, pVM->patm.s.CTXSUFF(pGCState)->GCCallReturnAddr, pRegFrame->edx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
428 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
429 return VINF_SUCCESS;
430#endif
431 default:
432 AssertFailed();
433 break;
434 }
435 }
436 else
437 AssertFailed();
438 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
439 }
440 AssertMsgFailed(("Unexpected OP_ILLUD2 in patch code at %x (pending action %x)!!!!\n", pRegFrame->eip, CTXSUFF(pVM->patm.s.pGCState)->uPendingAction));
441 return VINF_EM_RAW_EMULATE_INSTR;
442}
443
444/**
445 * Checks if the int 3 was caused by a patched instruction
446 *
447 * @returns Strict VBox status, includes all statuses that
448 * EMInterpretInstructionDisasState and
449 * @retval VINF_SUCCESS
450 * @retval VINF_PATM_PATCH_INT3
451 * @retval VINF_EM_RAW_EMULATE_INSTR
452 *
453 * @param pVM Pointer to the VM.
454 * @param pCtxCore The relevant core context.
455 */
456VMMRC_INT_DECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pRegFrame)
457{
458 PPATMPATCHREC pRec;
459 int rc;
460
461#ifdef VBOX_WITH_RAW_RING1
462 AssertReturn(!pRegFrame->eflags.Bits.u1VM && ((pRegFrame->ss.Sel & X86_SEL_RPL) == 1 || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 2)), VERR_ACCESS_DENIED);
463#else
464 AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED);
465#endif
466
467 /* Int 3 in PATM generated code? (most common case) */
468 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
469 {
470 /* Note! Hardcoded assumption about it being a single byte int 3 instruction. */
471 pRegFrame->eip--;
472 return VINF_PATM_PATCH_INT3;
473 }
474
475 /** @todo could use simple caching here to speed things up. */
476 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)(pRegFrame->eip - 1)); /* eip is pointing to the instruction *after* 'int 3' already */
477 if (pRec && pRec->patch.uState == PATCH_ENABLED)
478 {
479 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT_BLOCK)
480 {
481 Assert(pRec->patch.opcode == OP_CLI);
482 /* This is a special cli block that was turned into an int 3 patch. We jump to the generated code manually. */
483 pRegFrame->eip = (uint32_t)PATCHCODE_PTR_GC(&pRec->patch);
484 STAM_COUNTER_INC(&pVM->patm.s.StatInt3BlockRun);
485 return VINF_SUCCESS;
486 }
487 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT)
488 {
489 /* eip is pointing to the instruction *after* 'int 3' already */
490 pRegFrame->eip = pRegFrame->eip - 1;
491
492 PATM_STAT_RUN_INC(&pRec->patch);
493
494 Log(("PATMHandleInt3PatchTrap found int3 for %s at %x\n", patmGetInstructionString(pRec->patch.opcode, 0), pRegFrame->eip));
495
496 switch(pRec->patch.opcode)
497 {
498 case OP_CPUID:
499 case OP_IRET:
500#ifdef VBOX_WITH_RAW_RING1
501 case OP_SMSW:
502 case OP_MOV: /* mov xx, CS */
503#endif
504 break;
505
506 case OP_STR:
507 case OP_SGDT:
508 case OP_SLDT:
509 case OP_SIDT:
510 case OP_LSL:
511 case OP_LAR:
512#ifndef VBOX_WITH_RAW_RING1
513 case OP_SMSW:
514#endif
515 case OP_VERW:
516 case OP_VERR:
517 default:
518 PATM_STAT_FAULT_INC(&pRec->patch);
519 pRec->patch.cTraps++;
520 return VINF_EM_RAW_EMULATE_INSTR;
521 }
522
523 PVMCPU pVCpu = VMMGetCpu0(pVM);
524 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
525 if (enmCpuMode != DISCPUMODE_32BIT)
526 {
527 AssertFailed();
528 return VINF_EM_RAW_EMULATE_INSTR;
529 }
530
531#ifdef VBOX_WITH_IEM
532 VBOXSTRICTRC rcStrict;
533 rcStrict = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip,
534 pRec->patch.aPrivInstr, pRec->patch.cbPrivInstr);
535 rc = VBOXSTRICTRC_TODO(rcStrict);
536#else
537 uint32_t cbOp;
538 DISCPUSTATE cpu;
539 rc = DISInstr(&pRec->patch.aPrivInstr[0], enmCpuMode, &cpu, &cbOp);
540 if (RT_FAILURE(rc))
541 {
542 Log(("DISCoreOne failed with %Rrc\n", rc));
543 PATM_STAT_FAULT_INC(&pRec->patch);
544 pRec->patch.cTraps++;
545 return VINF_EM_RAW_EMULATE_INSTR;
546 }
547
548 rc = EMInterpretInstructionDisasState(pVCpu, &cpu, pRegFrame, 0 /* not relevant here */,
549 EMCODETYPE_SUPERVISOR);
550#endif
551 if (RT_FAILURE(rc))
552 {
553 Log(("EMInterpretInstructionCPU failed with %Rrc\n", rc));
554 PATM_STAT_FAULT_INC(&pRec->patch);
555 pRec->patch.cTraps++;
556 return VINF_EM_RAW_EMULATE_INSTR;
557 }
558 return rc;
559 }
560 }
561 return VERR_PATCH_NOT_FOUND;
562}
563
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette