VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATMPatch.cpp@ 400

Last change on this file since 400 was 302, checked in by vboxsync, 18 years ago

Handle iret to ring 0 code with IF cleared. Warning: possibly dangerous change.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 50.1 KB
Line 
1/* $Id: PATMPatch.cpp 302 2007-01-25 14:48:55Z vboxsync $ */
2/** @file
3 * PATMPatch - Dynamic Guest OS Instruction patches
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006 InnoTek Systemberatung GmbH
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License as published by the Free Software Foundation,
15 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
16 * distribution. VirtualBox OSE is distributed in the hope that it will
17 * be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * If you received this file as part of a commercial VirtualBox
20 * distribution, then only the terms of your commercial VirtualBox
21 * license agreement apply instead of the previous paragraph.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/stam.h>
30#include <VBox/pgm.h>
31#include <VBox/cpum.h>
32#include <VBox/iom.h>
33#include <VBox/sup.h>
34#include <VBox/mm.h>
35#include <VBox/ssm.h>
36#include <VBox/pdm.h>
37#include <VBox/trpm.h>
38#include <VBox/param.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include <VBox/vm.h>
42#include <VBox/csam.h>
43
44#include <VBox/dbg.h>
45#include <VBox/err.h>
46#include <VBox/log.h>
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <iprt/string.h>
50#include <VBox/dis.h>
51#include <VBox/disopcode.h>
52
53#include <stdlib.h>
54#include <stdio.h>
55#include "PATMA.h"
56#include "PATMPatch.h"
57
58/* internal structure for passing more information about call fixups to patmPatchGenCode */
59typedef struct
60{
61 RTGCPTR pTargetGC;
62 RTGCPTR pCurInstrGC;
63 RTGCPTR pNextInstrGC;
64 RTGCPTR pReturnGC;
65} PATMCALLINFO, *PPATMCALLINFO;
66
67int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType, RTGCPTR pSource, RTGCPTR pDest)
68{
69 PRELOCREC pRec;
70
71 Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
72
73 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%VGv source=%VGv dest=%VGv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
74
75 pRec = (PRELOCREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
76 Assert(pRec);
77 pRec->Core.Key = (AVLPVKEY)pRelocHC;
78 pRec->pRelocPos = pRelocHC; /* @todo redundant. */
79 pRec->pSource = pSource;
80 pRec->pDest = pDest;
81 pRec->uType = uType;
82
83 bool ret = RTAvlPVInsert(&pPatch->FixupTree, &pRec->Core);
84 Assert(ret); NOREF(ret);
85 pPatch->nrFixups++;
86
87 return VINF_SUCCESS;
88}
89
90int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTGCPTR pTargetGC, uint32_t opcode)
91{
92 PJUMPREC pRec;
93
94 pRec = (PJUMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
95 Assert(pRec);
96
97 pRec->Core.Key = (AVLPVKEY)pJumpHC;
98 pRec->pJumpHC = pJumpHC; /* @todo redundant. */
99 pRec->offDispl = offset;
100 pRec->pTargetGC = pTargetGC;
101 pRec->opcode = opcode;
102
103 bool ret = RTAvlPVInsert(&pPatch->JumpTree, &pRec->Core);
104 Assert(ret); NOREF(ret);
105 pPatch->nrJumpRecs++;
106
107 return VINF_SUCCESS;
108}
109
110#define PATCHGEN_PROLOG_NODEF(pVM, pPatch) \
111 pPB = PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset; \
112 \
113 if (pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) \
114 { \
115 pVM->patm.s.fOutOfMemory = true; \
116 Assert(pPB + 256 >= pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem); \
117 return VERR_NO_MEMORY; \
118 }
119
120#define PATCHGEN_PROLOG(pVM, pPatch) \
121 uint8_t *pPB; \
122 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
123
124
125#define PATCHGEN_EPILOG(pPatch, size) \
126 Assert(size <= 512); \
127 pPatch->uCurPatchOffset += size;
128
129
130static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PPATCHASMRECORD pAsmRecord, GCPTRTYPE(uint8_t *)pReturnAddrGC, bool fGenJump,
131 PPATMCALLINFO pCallInfo = 0)
132{
133 uint32_t i, j;
134
135 Assert(fGenJump == false || pReturnAddrGC);
136 Assert(fGenJump == false || pAsmRecord->offJump);
137 Assert(pAsmRecord && pAsmRecord->size > sizeof(pAsmRecord->uReloc[0]));
138
139 // Copy the code block
140 memcpy(pPB, pAsmRecord->pFunction, pAsmRecord->size);
141
142 // Process all fixups
143 for (j=0,i=0;i<pAsmRecord->nrRelocs*2; i+=2)
144 {
145 for (;j<pAsmRecord->size;j++)
146 {
147 if (*(uint32_t*)&pPB[j] == pAsmRecord->uReloc[i])
148 {
149 GCPTRTYPE(uint32_t *)dest;
150
151#ifdef VBOX_STRICT
152 if (pAsmRecord->uReloc[i] == PATM_FIXUP)
153 Assert(pAsmRecord->uReloc[i+1] != 0);
154 else
155 Assert(pAsmRecord->uReloc[i+1] == 0);
156#endif
157
158 switch (pAsmRecord->uReloc[i])
159 {
160 case PATM_VMFLAGS:
161 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
162 break;
163
164 case PATM_PENDINGACTION:
165 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
166 break;
167
168 case PATM_FIXUP:
169 /* Offset in uReloc[i+1] is from the base of the function. */
170 dest = (RTGCUINTPTR)pVM->patm.s.pPatchMemGC + pAsmRecord->uReloc[i+1] + (RTGCUINTPTR)(pPB - pVM->patm.s.pPatchMemHC);
171 break;
172#ifdef VBOX_WITH_STATISTICS
173 case PATM_ALLPATCHCALLS:
174 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
175 break;
176
177 case PATM_IRETEFLAGS:
178 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
179 break;
180
181 case PATM_IRETCS:
182 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
183 break;
184
185 case PATM_IRETEIP:
186 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
187 break;
188
189 case PATM_PERPATCHCALLS:
190 dest = patmPatchQueryStatAddress(pVM, pPatch);
191 break;
192#endif
193 case PATM_STACKPTR:
194 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
195 break;
196
197 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
198 * part to store the original return addresses.
199 */
200 case PATM_STACKBASE:
201 dest = pVM->patm.s.pGCStackGC;
202 break;
203
204 case PATM_STACKBASE_GUEST:
205 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
206 break;
207
208 case PATM_RETURNADDR: /* absolute guest address; no fixup required */
209 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
210 dest = pCallInfo->pReturnGC;
211 break;
212
213 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */
214 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
215
216 /** @note hardcoded assumption that we must return to the instruction following this block */
217 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->size;
218 break;
219
220 case PATM_CALLTARGET: /* relative to patch address; no fixup requird */
221 Assert(pCallInfo && pAsmRecord->uReloc[i] >= PATM_NO_FIXUP);
222
223 /* Address must be filled in later. (see patmr3SetBranchTargets) */
224 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
225 dest = PATM_ILLEGAL_DESTINATION;
226 break;
227
228 case PATM_PATCHBASE: /* Patch GC base address */
229 dest = pVM->patm.s.pPatchMemGC;
230 break;
231
232 case PATM_CPUID_STD_PTR:
233 dest = CPUMGetGuestCpuIdStdGCPtr(pVM);
234 break;
235
236 case PATM_CPUID_EXT_PTR:
237 dest = CPUMGetGuestCpuIdExtGCPtr(pVM);
238 break;
239
240 case PATM_CPUID_DEF_PTR:
241 dest = CPUMGetGuestCpuIdDefGCPtr(pVM);
242 break;
243
244 case PATM_CPUID_STD_MAX:
245 dest = CPUMGetGuestCpuIdStdMax(pVM);
246 break;
247
248 case PATM_CPUID_EXT_MAX:
249 dest = CPUMGetGuestCpuIdExtMax(pVM);
250 break;
251
252 case PATM_INTERRUPTFLAG:
253 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
254 break;
255
256 case PATM_INHIBITIRQADDR:
257 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
258 break;
259
260 case PATM_NEXTINSTRADDR:
261 Assert(pCallInfo);
262 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
263 dest = pCallInfo->pNextInstrGC;
264 break;
265
266 case PATM_CURINSTRADDR:
267 Assert(pCallInfo);
268 dest = pCallInfo->pCurInstrGC;
269 break;
270
271 case PATM_VM_FORCEDACTIONS:
272 dest = pVM->pVMGC + RT_OFFSETOF(VM, fForcedActions);
273 break;
274
275 case PATM_TEMP_EAX:
276 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
277 break;
278 case PATM_TEMP_ECX:
279 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
280 break;
281 case PATM_TEMP_EDI:
282 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
283 break;
284 case PATM_TEMP_EFLAGS:
285 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
286 break;
287 case PATM_TEMP_RESTORE_FLAGS:
288 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
289 break;
290
291 /* Relative address of global patm lookup and call function. */
292 case PATM_LOOKUP_AND_CALL_FUNCTION:
293 {
294 RTGCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR)(&pPB[j] + sizeof(RTGCPTR) - pVM->patm.s.pPatchMemHC);
295 Assert(pVM->patm.s.pfnHelperCallGC);
296 Assert(sizeof(uint32_t) == sizeof(RTGCPTR));
297
298 /* Relative value is target minus address of instruction after the actual call instruction. */
299 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
300 break;
301 }
302
303 case PATM_RETURN_FUNCTION:
304 {
305 RTGCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR)(&pPB[j] + sizeof(RTGCPTR) - pVM->patm.s.pPatchMemHC);
306 Assert(pVM->patm.s.pfnHelperRetGC);
307 Assert(sizeof(uint32_t) == sizeof(RTGCPTR));
308
309 /* Relative value is target minus address of instruction after the actual call instruction. */
310 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
311 break;
312 }
313
314 case PATM_IRET_FUNCTION:
315 {
316 RTGCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR)(&pPB[j] + sizeof(RTGCPTR) - pVM->patm.s.pPatchMemHC);
317 Assert(pVM->patm.s.pfnHelperIretGC);
318 Assert(sizeof(uint32_t) == sizeof(RTGCPTR));
319
320 /* Relative value is target minus address of instruction after the actual call instruction. */
321 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
322 break;
323 }
324
325 case PATM_LOOKUP_AND_JUMP_FUNCTION:
326 {
327 RTGCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR)(&pPB[j] + sizeof(RTGCPTR) - pVM->patm.s.pPatchMemHC);
328 Assert(pVM->patm.s.pfnHelperJumpGC);
329 Assert(sizeof(uint32_t) == sizeof(RTGCPTR));
330
331 /* Relative value is target minus address of instruction after the actual call instruction. */
332 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
333 break;
334 }
335
336 default:
337 dest = PATM_ILLEGAL_DESTINATION;
338 AssertRelease(0);
339 break;
340 }
341
342 *(RTGCPTR *)&pPB[j] = dest;
343 if (pAsmRecord->uReloc[i] < PATM_NO_FIXUP)
344 {
345 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE);
346 }
347 break;
348 }
349 }
350 Assert(j < pAsmRecord->size);
351 }
352 Assert(pAsmRecord->uReloc[i] == 0xffffffff);
353
354 /* Add the jump back to guest code (if required) */
355 if (fGenJump)
356 {
357 int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
358
359 /* Add lookup record for patch to guest address translation */
360 Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
361 patmr3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
362
363 *(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
364 patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
365 PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32,
366 pReturnAddrGC);
367 }
368
369 // Calculate the right size of this patch block
370 if ((fGenJump && pAsmRecord->offJump) || (!fGenJump && !pAsmRecord->offJump))
371 {
372 return pAsmRecord->size;
373 }
374 else {
375 // if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
376 return pAsmRecord->size - SIZEOF_NEARJUMP32;
377 }
378}
379
380/* Read bytes and check for overwritten instructions. */
381static int patmPatchReadBytes(PVM pVM, uint8_t *pDest, RTGCPTR pSrc, uint32_t cb)
382{
383 int rc = PGMPhysReadGCPtr(pVM, pDest, pSrc, cb);
384 AssertRCReturn(rc, rc);
385 /*
386 * Could be patched already; make sure this is checked!
387 */
388 for (uint32_t i=0;i<cb;i++)
389 {
390 uint8_t temp;
391
392 int rc2 = PATMR3QueryOpcode(pVM, pSrc+i, &temp);
393 if (VBOX_SUCCESS(rc2))
394 {
395 pDest[i] = temp;
396 }
397 else
398 break; /* no more */
399 }
400 return VINF_SUCCESS;
401}
402
403int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, GCPTRTYPE(uint8_t *) pCurInstrGC)
404{
405 int rc = VINF_SUCCESS;
406 PATCHGEN_PROLOG(pVM, pPatch);
407
408 rc = patmPatchReadBytes(pVM, pPB, pCurInstrGC, pCpu->opsize);
409 AssertRC(rc);
410 PATCHGEN_EPILOG(pPatch, pCpu->opsize);
411 return rc;
412}
413
414int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC, bool fSizeOverride)
415{
416 uint32_t size;
417 PATMCALLINFO callInfo;
418
419 PATCHGEN_PROLOG(pVM, pPatch);
420
421 AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
422
423 callInfo.pCurInstrGC = pCurInstrGC;
424
425 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
426
427 PATCHGEN_EPILOG(pPatch, size);
428 return VINF_SUCCESS;
429}
430
431int patmPatchGenCli(PVM pVM, PPATCHINFO pPatch)
432{
433 uint32_t size;
434 PATCHGEN_PROLOG(pVM, pPatch);
435
436 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCliRecord, 0, false);
437
438 PATCHGEN_EPILOG(pPatch, size);
439 return VINF_SUCCESS;
440}
441
442/*
443 * Generate an STI patch
444 */
445int patmPatchGenSti(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC, RTGCPTR pNextInstrGC)
446{
447 PATMCALLINFO callInfo;
448 uint32_t size;
449
450 Log(("patmPatchGenSti at %VGv; next %VGv\n", pCurInstrGC, pNextInstrGC));
451 PATCHGEN_PROLOG(pVM, pPatch);
452 callInfo.pNextInstrGC = pNextInstrGC;
453 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStiRecord, 0, false, &callInfo);
454 PATCHGEN_EPILOG(pPatch, size);
455
456 return VINF_SUCCESS;
457}
458
459
460int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, GCPTRTYPE(uint8_t *)pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
461{
462 uint32_t size;
463 PATMCALLINFO callInfo;
464
465 PATCHGEN_PROLOG(pVM, pPatch);
466
467 callInfo.pNextInstrGC = pReturnAddrGC;
468
469 Log(("patmPatchGenPopf at %VGv\n", pReturnAddrGC));
470
471 /** @todo check if we mess up IOPL here (theoretical possibility afaik) */
472 if (fSizeOverride == true)
473 {
474 Log(("operand size override!!\n"));
475 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf16Record : &PATMPopf16Record_NoExit , pReturnAddrGC, fGenJumpBack, &callInfo);
476 }
477 else
478 {
479 size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf32Record : &PATMPopf32Record_NoExit, pReturnAddrGC, fGenJumpBack, &callInfo);
480 }
481
482 PATCHGEN_EPILOG(pPatch, size);
483 STAM_COUNTER_INC(&pVM->patm.s.StatGenPopf);
484 return VINF_SUCCESS;
485}
486
487int patmPatchGenPushf(PVM pVM, PPATCHINFO pPatch, bool fSizeOverride)
488{
489 uint32_t size;
490 PATCHGEN_PROLOG(pVM, pPatch);
491
492 if (fSizeOverride == true)
493 {
494 Log(("operand size override!!\n"));
495 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf16Record, 0, false);
496 }
497 else
498 {
499 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushf32Record, 0, false);
500 }
501
502 PATCHGEN_EPILOG(pPatch, size);
503 return VINF_SUCCESS;
504}
505
506int patmPatchGenPushCS(PVM pVM, PPATCHINFO pPatch)
507{
508 uint32_t size;
509 PATCHGEN_PROLOG(pVM, pPatch);
510 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMPushCSRecord, 0, false);
511 PATCHGEN_EPILOG(pPatch, size);
512 return VINF_SUCCESS;
513}
514
515int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, GCPTRTYPE(uint8_t *)pTargetGC, uint32_t opcode, bool fSizeOverride)
516{
517 uint32_t size = 0;
518 PPATCHASMRECORD pPatchAsmRec;
519
520 PATCHGEN_PROLOG(pVM, pPatch);
521
522 switch (opcode)
523 {
524 case OP_LOOP:
525 pPatchAsmRec = &PATMLoopRecord;
526 break;
527 case OP_LOOPNE:
528 pPatchAsmRec = &PATMLoopNZRecord;
529 break;
530 case OP_LOOPE:
531 pPatchAsmRec = &PATMLoopZRecord;
532 break;
533 case OP_JECXZ:
534 pPatchAsmRec = &PATMJEcxRecord;
535 break;
536 default:
537 AssertMsgFailed(("PatchGenLoop: invalid opcode %d\n", opcode));
538 return VERR_INVALID_PARAMETER;
539 }
540 Assert(pPatchAsmRec->offSizeOverride && pPatchAsmRec->offRelJump);
541
542 Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
543
544 // Generate the patch code
545 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
546
547 if (fSizeOverride)
548 {
549 pPB[pPatchAsmRec->offSizeOverride] = 0x66; // ecx -> cx or vice versa
550 }
551
552 *(RTGCPTR *)&pPB[pPatchAsmRec->offRelJump] = 0xDEADBEEF;
553
554 patmPatchAddJump(pVM, pPatch, &pPB[pPatchAsmRec->offRelJump - 1], 1, pTargetGC, opcode);
555
556 PATCHGEN_EPILOG(pPatch, size);
557 return VINF_SUCCESS;
558}
559
560int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, GCPTRTYPE(uint8_t *)pTargetGC, uint32_t opcode, bool fSizeOverride)
561{
562 uint32_t offset = 0;
563 PATCHGEN_PROLOG(pVM, pPatch);
564
565 // internal relative jumps from patch code to patch code; no relocation record required
566
567 Assert(PATMIsPatchGCAddr(pVM, pTargetGC) == false);
568
569 switch (opcode)
570 {
571 case OP_JO:
572 pPB[1] = 0x80;
573 break;
574 case OP_JNO:
575 pPB[1] = 0x81;
576 break;
577 case OP_JC:
578 pPB[1] = 0x82;
579 break;
580 case OP_JNC:
581 pPB[1] = 0x83;
582 break;
583 case OP_JE:
584 pPB[1] = 0x84;
585 break;
586 case OP_JNE:
587 pPB[1] = 0x85;
588 break;
589 case OP_JBE:
590 pPB[1] = 0x86;
591 break;
592 case OP_JNBE:
593 pPB[1] = 0x87;
594 break;
595 case OP_JS:
596 pPB[1] = 0x88;
597 break;
598 case OP_JNS:
599 pPB[1] = 0x89;
600 break;
601 case OP_JP:
602 pPB[1] = 0x8A;
603 break;
604 case OP_JNP:
605 pPB[1] = 0x8B;
606 break;
607 case OP_JL:
608 pPB[1] = 0x8C;
609 break;
610 case OP_JNL:
611 pPB[1] = 0x8D;
612 break;
613 case OP_JLE:
614 pPB[1] = 0x8E;
615 break;
616 case OP_JNLE:
617 pPB[1] = 0x8F;
618 break;
619
620 case OP_JMP:
621 /* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
622 /* Add lookup record for patch to guest address translation */
623 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
624
625 pPB[0] = 0xE9;
626 break;
627
628 case OP_JECXZ:
629 case OP_LOOP:
630 case OP_LOOPNE:
631 case OP_LOOPE:
632 return patmPatchGenLoop(pVM, pPatch, pTargetGC, opcode, fSizeOverride);
633
634 default:
635 AssertMsg(0, ("Invalid jump opcode %d\n", opcode));
636 return VERR_PATCHING_REFUSED;
637 }
638 if (opcode != OP_JMP)
639 {
640 pPB[0] = 0xF;
641 offset += 2;
642 }
643 else offset++;
644
645 *(RTGCPTR *)&pPB[offset] = 0xDEADBEEF;
646
647 patmPatchAddJump(pVM, pPatch, pPB, offset, pTargetGC, opcode);
648
649 offset += sizeof(RTGCPTR);
650
651 PATCHGEN_EPILOG(pPatch, offset);
652 return VINF_SUCCESS;
653}
654
655/*
656 * Rewrite call to dynamic or currently unknown function (on-demand patching of function)
657 */
658int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC, RTGCPTR pTargetGC, bool fIndirect)
659{
660 PATMCALLINFO callInfo;
661 uint32_t offset;
662 uint32_t i, size;
663 int rc;
664
665 /** @note Don't check for IF=1 here. The ret instruction will do this. */
666 /** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
667
668 /* 1: Clear PATM interrupt flag on entry. */
669 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
670 if (rc == VERR_NO_MEMORY)
671 return rc;
672 AssertRCReturn(rc, rc);
673
674 PATCHGEN_PROLOG(pVM, pPatch);
675 /* 2: We must push the target address onto the stack before appending the indirect call code. */
676
677 if (fIndirect)
678 {
679 Log(("patmPatchGenIndirectCall\n"));
680 Assert(pCpu->param1.size == 4);
681 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J);
682
683 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
684 * a page fault. The assembly code restores the stack afterwards.
685 */
686 offset = 0;
687 pPB[offset++] = 0xFF; // push r/m32
688 pPB[offset++] = MAKE_MODRM(MODRM_MOD(pCpu->ModRM), 6 /* group 5 */, MODRM_RM(pCpu->ModRM));
689 i = 2; /* standard offset of modrm bytes */
690 if (pCpu->prefix & PREFIX_OPSIZE)
691 i++; //skip operand prefix
692 if (pCpu->prefix & PREFIX_SEG)
693 i++; //skip segment prefix
694
695 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTGCPTR)((RTGCUINTPTR)pCurInstrGC + i), pCpu->opsize - i);
696 AssertRCReturn(rc, rc);
697 offset += (pCpu->opsize - i);
698 }
699 else
700 {
701 AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%VGv)?!?\n", pTargetGC));
702 Assert(pTargetGC);
703 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J);
704
705 /** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
706
707 /* Relative call to patch code (patch to patch -> no fixup). */
708 Log(("PatchGenCall from %VGv (next=%VGv) to %VGv\n", pCurInstrGC, pCurInstrGC + pCpu->opsize, pTargetGC));
709
710 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
711 * a page fault. The assembly code restores the stack afterwards.
712 */
713 offset = 0;
714 pPB[offset++] = 0x68; // push %Iv
715 *(RTGCPTR *)&pPB[offset] = pTargetGC;
716 offset += sizeof(RTGCPTR);
717 }
718
719 /* align this block properly to make sure the jump table will not be misaligned. */
720 size = (RTHCUINTPTR)&pPB[offset] & 3;
721 if (size)
722 size = 4 - size;
723
724 for (i=0;i<size;i++)
725 {
726 pPB[offset++] = 0x90; /* nop */
727 }
728 PATCHGEN_EPILOG(pPatch, offset);
729
730 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
731 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
732 callInfo.pReturnGC = pCurInstrGC + pCpu->opsize;
733 callInfo.pTargetGC = (fIndirect) ? 0xDEADBEEF : pTargetGC;
734 size = patmPatchGenCode(pVM, pPatch, pPB, (fIndirect) ? &PATMCallIndirectRecord : &PATMCallRecord, 0, false, &callInfo);
735 PATCHGEN_EPILOG(pPatch, size);
736
737 /* Need to set PATM_INTERRUPTFLAG after the patched ret returns here. */
738 rc = patmPatchGenSetPIF(pVM, pPatch, pCurInstrGC);
739 if (rc == VERR_NO_MEMORY)
740 return rc;
741 AssertRCReturn(rc, rc);
742
743 STAM_COUNTER_INC(&pVM->patm.s.StatGenCall);
744 return VINF_SUCCESS;
745}
746
747/**
748 * Generate indirect jump to unknown destination
749 *
750 * @returns VBox status code.
751 * @param pVM The VM to operate on.
752 * @param pPatch Patch record
753 * @param pCpu Disassembly state
754 * @param pCurInstrGC Current instruction address
755 */
756int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC)
757{
758 PATMCALLINFO callInfo;
759 uint32_t offset;
760 uint32_t i, size;
761 int rc;
762
763 /* 1: Clear PATM interrupt flag on entry. */
764 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
765 if (rc == VERR_NO_MEMORY)
766 return rc;
767 AssertRCReturn(rc, rc);
768
769 PATCHGEN_PROLOG(pVM, pPatch);
770 /* 2: We must push the target address onto the stack before appending the indirect call code. */
771
772 Log(("patmPatchGenIndirectJump\n"));
773 Assert(pCpu->param1.size == 4);
774 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J);
775
776 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
777 * a page fault. The assembly code restores the stack afterwards.
778 */
779 offset = 0;
780 pPB[offset++] = 0xFF; // push r/m32
781 pPB[offset++] = MAKE_MODRM(MODRM_MOD(pCpu->ModRM), 6 /* group 5 */, MODRM_RM(pCpu->ModRM));
782 i = 2; /* standard offset of modrm bytes */
783 if (pCpu->prefix & PREFIX_OPSIZE)
784 i++; //skip operand prefix
785 if (pCpu->prefix & PREFIX_SEG)
786 i++; //skip segment prefix
787
788 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTGCPTR)((RTGCUINTPTR)pCurInstrGC + i), pCpu->opsize - i);
789 AssertRCReturn(rc, rc);
790 offset += (pCpu->opsize - i);
791
792 /* align this block properly to make sure the jump table will not be misaligned. */
793 size = (RTHCUINTPTR)&pPB[offset] & 3;
794 if (size)
795 size = 4 - size;
796
797 for (i=0;i<size;i++)
798 {
799 pPB[offset++] = 0x90; /* nop */
800 }
801 PATCHGEN_EPILOG(pPatch, offset);
802
803 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
804 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
805 callInfo.pReturnGC = pCurInstrGC + pCpu->opsize;
806 callInfo.pTargetGC = 0xDEADBEEF;
807 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpIndirectRecord, 0, false, &callInfo);
808 PATCHGEN_EPILOG(pPatch, size);
809
810 STAM_COUNTER_INC(&pVM->patm.s.StatGenJump);
811 return VINF_SUCCESS;
812}
813
814/**
815 * Generate return instruction
816 *
817 * @returns VBox status code.
818 * @param pVM The VM to operate on.
819 * @param pPatch Patch structure
820 * @param pCpu Disassembly struct
821 * @param pCurInstrGC Current instruction pointer
822 *
823 */
824int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, GCPTRTYPE(uint8_t *) pCurInstrGC)
825{
826 int size = 0, rc;
827 RTGCPTR pPatchRetInstrGC;
828
829 /* Remember start of this patch for below. */
830 pPatchRetInstrGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
831
832 Log(("patmPatchGenRet %VGv\n", pCurInstrGC));
833
834 /** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
835 if ( pPatch->pTempInfo->pPatchRetInstrGC
836 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->param1.parval) /* nr of bytes popped off the stack should be identical of course! */
837 {
838 Assert(pCpu->pCurInstr->opcode == OP_RETN);
839 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused);
840
841 return patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, pPatch->pTempInfo->pPatchRetInstrGC);
842 }
843
844 /* Jump back to the original instruction if IF is set again. */
845 Assert(!PATMFindActivePatchByEntrypoint(pVM, pCurInstrGC));
846 rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
847 AssertRCReturn(rc, rc);
848
849 /* align this block properly to make sure the jump table will not be misaligned. */
850 PATCHGEN_PROLOG(pVM, pPatch);
851 size = (RTHCUINTPTR)pPB & 3;
852 if (size)
853 size = 4 - size;
854
855 for (int i=0;i<size;i++)
856 pPB[i] = 0x90; /* nop */
857 PATCHGEN_EPILOG(pPatch, size);
858
859 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
860 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetRecord, 0, false);
861 PATCHGEN_EPILOG(pPatch, size);
862
863 STAM_COUNTER_INC(&pVM->patm.s.StatGenRet);
864 /* Duplicate the ret or ret n instruction; it will use the PATM return address */
865 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
866
867 if (rc == VINF_SUCCESS)
868 {
869 pPatch->pTempInfo->pPatchRetInstrGC = pPatchRetInstrGC;
870 pPatch->pTempInfo->uPatchRetParam1 = pCpu->param1.parval;
871 }
872 return rc;
873}
874
875/**
876 * Generate all global patm functions
877 *
878 * @returns VBox status code.
879 * @param pVM The VM to operate on.
880 * @param pPatch Patch structure
881 *
882 */
883int patmPatchGenGlobalFunctions(PVM pVM, PPATCHINFO pPatch)
884{
885 int size = 0;
886
887 pVM->patm.s.pfnHelperCallGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
888 PATCHGEN_PROLOG(pVM, pPatch);
889 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndCallRecord, 0, false);
890 PATCHGEN_EPILOG(pPatch, size);
891
892 /* Round to next 8 byte boundary. */
893 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
894
895 pVM->patm.s.pfnHelperRetGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
896 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
897 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMRetFunctionRecord, 0, false);
898 PATCHGEN_EPILOG(pPatch, size);
899
900 /* Round to next 8 byte boundary. */
901 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
902
903 pVM->patm.s.pfnHelperJumpGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
904 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
905 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMLookupAndJumpRecord, 0, false);
906 PATCHGEN_EPILOG(pPatch, size);
907
908 /* Round to next 8 byte boundary. */
909 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
910
911 pVM->patm.s.pfnHelperIretGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
912 PATCHGEN_PROLOG_NODEF(pVM, pPatch);
913 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretFunctionRecord, 0, false);
914 PATCHGEN_EPILOG(pPatch, size);
915
916 Log(("pfnHelperCallGC %VGv\n", pVM->patm.s.pfnHelperCallGC));
917 Log(("pfnHelperRetGC %VGv\n", pVM->patm.s.pfnHelperRetGC));
918 Log(("pfnHelperJumpGC %VGv\n", pVM->patm.s.pfnHelperJumpGC));
919 Log(("pfnHelperIretGC %VGv\n", pVM->patm.s.pfnHelperIretGC));
920
921 return VINF_SUCCESS;
922}
923
924/**
925 * Generate illegal instruction (int 3)
926 *
927 * @returns VBox status code.
928 * @param pVM The VM to operate on.
929 * @param pPatch Patch structure
930 *
931 */
932int patmPatchGenIllegalInstr(PVM pVM, PPATCHINFO pPatch)
933{
934 PATCHGEN_PROLOG(pVM, pPatch);
935
936 pPB[0] = 0xCC;
937
938 PATCHGEN_EPILOG(pPatch, 1);
939 return VINF_SUCCESS;
940}
941
942/**
943 * Check virtual IF flag and jump back to original guest code if set
944 *
945 * @returns VBox status code.
946 * @param pVM The VM to operate on.
947 * @param pPatch Patch structure
948 * @param pCurInstrGC Guest context pointer to the current instruction
949 *
950 */
951int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC)
952{
953 uint32_t size;
954
955 PATCHGEN_PROLOG(pVM, pPatch);
956
957 /* Add lookup record for patch to guest address translation */
958 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
959
960 /* Generate code to check for IF=1 before executing the call to the duplicated function. */
961 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCheckIFRecord, pCurInstrGC, true);
962
963 PATCHGEN_EPILOG(pPatch, size);
964 return VINF_SUCCESS;
965}
966
967/**
968 * Set PATM interrupt flag
969 *
970 * @returns VBox status code.
971 * @param pVM The VM to operate on.
972 * @param pPatch Patch structure
973 * @param pInstrGC Corresponding guest instruction
974 *
975 */
976int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTGCPTR pInstrGC)
977{
978 PATCHGEN_PROLOG(pVM, pPatch);
979
980 /* Add lookup record for patch to guest address translation */
981 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
982
983 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMSetPIFRecord, 0, false);
984 PATCHGEN_EPILOG(pPatch, size);
985 return VINF_SUCCESS;
986}
987
988/**
989 * Clear PATM interrupt flag
990 *
991 * @returns VBox status code.
992 * @param pVM The VM to operate on.
993 * @param pPatch Patch structure
994 * @param pInstrGC Corresponding guest instruction
995 *
996 */
997int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTGCPTR pInstrGC)
998{
999 PATCHGEN_PROLOG(pVM, pPatch);
1000
1001 /* Add lookup record for patch to guest address translation */
1002 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1003
1004 int size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearPIFRecord, 0, false);
1005 PATCHGEN_EPILOG(pPatch, size);
1006 return VINF_SUCCESS;
1007}
1008
1009
1010/**
1011 * Clear PATM inhibit irq flag
1012 *
1013 * @returns VBox status code.
1014 * @param pVM The VM to operate on.
1015 * @param pPatch Patch structure
1016 * @param pNextInstrGC Next guest instruction
1017 */
1018int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTGCPTR pNextInstrGC)
1019{
1020 int size;
1021 PATMCALLINFO callInfo;
1022
1023 PATCHGEN_PROLOG(pVM, pPatch);
1024
1025 Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
1026
1027 /* Add lookup record for patch to guest address translation */
1028 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
1029
1030 callInfo.pNextInstrGC = pNextInstrGC;
1031
1032 if (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1033 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQContIF0Record, 0, false, &callInfo);
1034 else
1035 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMClearInhibitIRQFaultIF0Record, 0, false, &callInfo);
1036
1037 PATCHGEN_EPILOG(pPatch, size);
1038 return VINF_SUCCESS;
1039}
1040
1041/**
1042 * Generate an interrupt handler entrypoint
1043 *
1044 * @returns VBox status code.
1045 * @param pVM The VM to operate on.
1046 * @param pPatch Patch record
1047 * @param pIntHandlerGC IDT handler address
1048 *
1049 ** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
1050 */
1051int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTGCPTR pIntHandlerGC)
1052{
1053 uint32_t size;
1054 int rc = VINF_SUCCESS;
1055
1056 PATCHGEN_PROLOG(pVM, pPatch);
1057
1058 /* Add lookup record for patch to guest address translation */
1059 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1060
1061 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
1062 size = patmPatchGenCode(pVM, pPatch, pPB,
1063 (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
1064 0, false);
1065
1066 PATCHGEN_EPILOG(pPatch, size);
1067
1068 // Interrupt gates set IF to 0
1069 rc = patmPatchGenCli(pVM, pPatch);
1070 AssertRCReturn(rc, rc);
1071
1072 return rc;
1073}
1074
1075/**
1076 * Generate a trap handler entrypoint
1077 *
1078 * @returns VBox status code.
1079 * @param pVM The VM to operate on.
1080 * @param pPatch Patch record
1081 * @param pTrapHandlerGC IDT handler address
1082 */
1083int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTGCPTR pTrapHandlerGC)
1084{
1085 uint32_t size;
1086
1087 PATCHGEN_PROLOG(pVM, pPatch);
1088
1089 /* Add lookup record for patch to guest address translation */
1090 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1091
1092 /* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
1093 size = patmPatchGenCode(pVM, pPatch, pPB,
1094 (pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE) ? &PATMTrapEntryRecordErrorCode : &PATMTrapEntryRecord,
1095 pTrapHandlerGC, true);
1096 PATCHGEN_EPILOG(pPatch, size);
1097
1098 return VINF_SUCCESS;
1099}
1100
1101#ifdef VBOX_WITH_STATISTICS
1102int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTGCPTR pInstrGC)
1103{
1104 uint32_t size;
1105
1106 PATCHGEN_PROLOG(pVM, pPatch);
1107
1108 /* Add lookup record for stats code -> guest handler. */
1109 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1110
1111 /* Generate code to keep calling statistics for this patch */
1112 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMStatsRecord, pInstrGC, false);
1113 PATCHGEN_EPILOG(pPatch, size);
1114
1115 return VINF_SUCCESS;
1116}
1117#endif
1118
1119/**
1120 * Debug register moves to or from general purpose registers
1121 * mov GPR, DRx
1122 * mov DRx, GPR
1123 *
1124 * @todo: if we ever want to support hardware debug registers natively, then
1125 * this will need to be changed!
1126 */
1127int patmPatchGenMovDebug(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1128{
1129 int rc = VINF_SUCCESS;
1130 int reg, mod, rm, dbgreg;
1131 uint32_t offset;
1132
1133 PATCHGEN_PROLOG(pVM, pPatch);
1134
1135 mod = 0; //effective address (only)
1136 rm = 5; //disp32
1137 if (pCpu->pCurInstr->param1 == OP_PARM_Dd)
1138 {
1139 Assert(0); // You not come here. Illegal!
1140
1141 // mov DRx, GPR
1142 pPB[0] = 0x89; //mov disp32, GPR
1143 Assert(pCpu->param1.flags & USE_REG_DBG);
1144 Assert(pCpu->param2.flags & USE_REG_GEN32);
1145
1146 dbgreg = pCpu->param1.base.reg_dbg;
1147 reg = pCpu->param2.base.reg_gen32;
1148 }
1149 else
1150 {
1151 // mov GPR, DRx
1152 Assert(pCpu->param1.flags & USE_REG_GEN32);
1153 Assert(pCpu->param2.flags & USE_REG_DBG);
1154
1155 pPB[0] = 0x8B; // mov GPR, disp32
1156 reg = pCpu->param1.base.reg_gen32;
1157 dbgreg = pCpu->param2.base.reg_dbg;
1158 }
1159
1160 pPB[1] = MAKE_MODRM(mod, reg, rm);
1161
1162 /// @todo: make this an array in the context structure
1163 switch (dbgreg)
1164 {
1165 case USE_REG_DR0:
1166 offset = RT_OFFSETOF(CPUMCTX, dr0);
1167 break;
1168 case USE_REG_DR1:
1169 offset = RT_OFFSETOF(CPUMCTX, dr1);
1170 break;
1171 case USE_REG_DR2:
1172 offset = RT_OFFSETOF(CPUMCTX, dr2);
1173 break;
1174 case USE_REG_DR3:
1175 offset = RT_OFFSETOF(CPUMCTX, dr3);
1176 break;
1177 case USE_REG_DR4:
1178 offset = RT_OFFSETOF(CPUMCTX, dr4);
1179 break;
1180 case USE_REG_DR5:
1181 offset = RT_OFFSETOF(CPUMCTX, dr5);
1182 break;
1183 case USE_REG_DR6:
1184 offset = RT_OFFSETOF(CPUMCTX, dr6);
1185 break;
1186 case USE_REG_DR7:
1187 offset = RT_OFFSETOF(CPUMCTX, dr7);
1188 break;
1189 default: /* Shut up compiler warning. */
1190 AssertFailed();
1191 offset = 0;
1192 break;
1193 }
1194 *(RTGCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1195 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1196
1197 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTGCPTR));
1198 return rc;
1199}
1200
1201/*
1202 * Control register moves to or from general purpose registers
1203 * mov GPR, CRx
1204 * mov CRx, GPR
1205 */
1206int patmPatchGenMovControl(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1207{
1208 int rc = VINF_SUCCESS;
1209 int reg, mod, rm, ctrlreg;
1210 uint32_t offset;
1211
1212 PATCHGEN_PROLOG(pVM, pPatch);
1213
1214 mod = 0; //effective address (only)
1215 rm = 5; //disp32
1216 if (pCpu->pCurInstr->param1 == OP_PARM_Cd)
1217 {
1218 Assert(0); // You not come here. Illegal!
1219
1220 // mov CRx, GPR
1221 pPB[0] = 0x89; //mov disp32, GPR
1222 ctrlreg = pCpu->param1.base.reg_ctrl;
1223 reg = pCpu->param2.base.reg_gen32;
1224 Assert(pCpu->param1.flags & USE_REG_CR);
1225 Assert(pCpu->param2.flags & USE_REG_GEN32);
1226 }
1227 else
1228 {
1229 // mov GPR, DRx
1230 Assert(pCpu->param1.flags & USE_REG_GEN32);
1231 Assert(pCpu->param2.flags & USE_REG_CR);
1232
1233 pPB[0] = 0x8B; // mov GPR, disp32
1234 reg = pCpu->param1.base.reg_gen32;
1235 ctrlreg = pCpu->param2.base.reg_ctrl;
1236 }
1237
1238 pPB[1] = MAKE_MODRM(mod, reg, rm);
1239
1240 /// @todo: make this an array in the context structure
1241 switch (ctrlreg)
1242 {
1243 case USE_REG_CR0:
1244 offset = RT_OFFSETOF(CPUMCTX, cr0);
1245 break;
1246 case USE_REG_CR2:
1247 offset = RT_OFFSETOF(CPUMCTX, cr2);
1248 break;
1249 case USE_REG_CR3:
1250 offset = RT_OFFSETOF(CPUMCTX, cr3);
1251 break;
1252 case USE_REG_CR4:
1253 offset = RT_OFFSETOF(CPUMCTX, cr4);
1254 break;
1255 default: /* Shut up compiler warning. */
1256 AssertFailed();
1257 offset = 0;
1258 break;
1259 }
1260 *(RTGCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1261 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1262
1263 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTGCPTR));
1264 return rc;
1265}
1266
1267
1268/**
1269 * Generate an sldt or str patch instruction
1270 *
1271 * @returns VBox status code.
1272 * @param pVM The VM to operate on.
1273 * @param pPatch Patch record
1274 * @param pCpu Disassembly state
1275 * @param pCurInstrGC Guest instruction address
1276 */
1277int patmPatchGenSldtStr(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC)
1278{
1279 // sldt %Ew
1280 int rc = VINF_SUCCESS;
1281 uint32_t offset = 0;
1282 uint32_t i;
1283
1284 /** @todo segment prefix (untested) */
1285 Assert(pCpu->prefix == PREFIX_NONE || pCpu->prefix == PREFIX_OPSIZE);
1286
1287 PATCHGEN_PROLOG(pVM, pPatch);
1288
1289 if (pCpu->param1.flags == USE_REG_GEN32 || pCpu->param1.flags == USE_REG_GEN16)
1290 {
1291 /* Register operand */
1292 // 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
1293
1294 if (pCpu->prefix == PREFIX_OPSIZE)
1295 pPB[offset++] = 0x66;
1296
1297 pPB[offset++] = 0x8B; // mov destreg, CPUMCTX.tr/ldtr
1298 /* Modify REG part according to destination of original instruction */
1299 pPB[offset++] = MAKE_MODRM(0, pCpu->param1.base.reg_gen32, 5);
1300 if (pCpu->pCurInstr->opcode == OP_STR)
1301 {
1302 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1303 }
1304 else
1305 {
1306 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1307 }
1308 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1309 offset += sizeof(RTGCPTR);
1310 }
1311 else
1312 {
1313 /* Memory operand */
1314 //50 push eax
1315 //52 push edx
1316 //8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1317 //66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
1318 //66 89 02 mov word ptr [edx],ax
1319 //5A pop edx
1320 //58 pop eax
1321
1322 pPB[offset++] = 0x50; // push eax
1323 pPB[offset++] = 0x52; // push edx
1324
1325 if (pCpu->prefix == PREFIX_SEG)
1326 {
1327 /** @todo untested */
1328 pPB[offset++] = pCpu->prefix_seg;
1329 }
1330 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1331 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1332 pPB[offset++] = MAKE_MODRM(MODRM_MOD(pCpu->ModRM), USE_REG_EDX, MODRM_RM(pCpu->ModRM));
1333
1334 i = 3; /* standard offset of modrm bytes */
1335 if (pCpu->prefix == PREFIX_OPSIZE)
1336 i++; //skip operand prefix
1337 if (pCpu->prefix == PREFIX_SEG)
1338 i++; //skip segment prefix
1339
1340 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTGCPTR)((RTGCUINTPTR)pCurInstrGC + i), pCpu->opsize - i);
1341 AssertRCReturn(rc, rc);
1342 offset += (pCpu->opsize - i);
1343
1344 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr
1345 pPB[offset++] = 0xA1;
1346 if (pCpu->pCurInstr->opcode == OP_STR)
1347 {
1348 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1349 }
1350 else
1351 {
1352 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1353 }
1354 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1355 offset += sizeof(RTGCPTR);
1356
1357 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1358 pPB[offset++] = 0x89;
1359 pPB[offset++] = 0x02;
1360
1361 pPB[offset++] = 0x5A; // pop edx
1362 pPB[offset++] = 0x58; // pop eax
1363 }
1364
1365 PATCHGEN_EPILOG(pPatch, offset);
1366
1367 return rc;
1368}
1369
1370/**
1371 * Generate an sgdt or sidt patch instruction
1372 *
1373 * @returns VBox status code.
1374 * @param pVM The VM to operate on.
1375 * @param pPatch Patch record
1376 * @param pCpu Disassembly state
1377 * @param pCurInstrGC Guest instruction address
1378 */
1379int patmPatchGenSxDT(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTGCPTR pCurInstrGC)
1380{
1381 int rc = VINF_SUCCESS;
1382 uint32_t offset = 0, offset_base, offset_limit;
1383 uint32_t i;
1384
1385 /* @todo segment prefix (untested) */
1386 Assert(pCpu->prefix == PREFIX_NONE);
1387
1388 // sgdt %Ms
1389 // sidt %Ms
1390
1391 switch (pCpu->pCurInstr->opcode)
1392 {
1393 case OP_SGDT:
1394 offset_base = RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1395 offset_limit = RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1396 break;
1397
1398 case OP_SIDT:
1399 offset_base = RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1400 offset_limit = RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1401 break;
1402
1403 default:
1404 return VERR_INVALID_PARAMETER;
1405 }
1406
1407//50 push eax
1408//52 push edx
1409//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1410//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
1411//66 89 02 mov word ptr [edx],ax
1412//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
1413//89 42 02 mov dword ptr [edx+2],eax
1414//5A pop edx
1415//58 pop eax
1416
1417 PATCHGEN_PROLOG(pVM, pPatch);
1418 pPB[offset++] = 0x50; // push eax
1419 pPB[offset++] = 0x52; // push edx
1420
1421 if (pCpu->prefix == PREFIX_SEG)
1422 {
1423 /* @todo untested */
1424 pPB[offset++] = pCpu->prefix_seg;
1425 }
1426 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1427 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1428 pPB[offset++] = MAKE_MODRM(MODRM_MOD(pCpu->ModRM), USE_REG_EDX, MODRM_RM(pCpu->ModRM));
1429
1430 i = 3; /* standard offset of modrm bytes */
1431 if (pCpu->prefix == PREFIX_OPSIZE)
1432 i++; //skip operand prefix
1433 if (pCpu->prefix == PREFIX_SEG)
1434 i++; //skip segment prefix
1435 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTGCPTR)((RTGCUINTPTR)pCurInstrGC + i), pCpu->opsize - i);
1436 AssertRCReturn(rc, rc);
1437 offset += (pCpu->opsize - i);
1438
1439 pPB[offset++] = 0x66; // mov ax, CPUMCTX.gdtr.limit
1440 pPB[offset++] = 0xA1;
1441 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_limit;
1442 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1443 offset += sizeof(RTGCPTR);
1444
1445 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1446 pPB[offset++] = 0x89;
1447 pPB[offset++] = 0x02;
1448
1449 pPB[offset++] = 0xA1; // mov eax, CPUMCTX.gdtr.base
1450 *(RTGCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_base;
1451 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1452 offset += sizeof(RTGCPTR);
1453
1454 pPB[offset++] = 0x89; // mov dword ptr [edx+2],eax
1455 pPB[offset++] = 0x42;
1456 pPB[offset++] = 0x02;
1457
1458 pPB[offset++] = 0x5A; // pop edx
1459 pPB[offset++] = 0x58; // pop eax
1460
1461 PATCHGEN_EPILOG(pPatch, offset);
1462
1463 return rc;
1464}
1465
1466/**
1467 * Generate a cpuid patch instruction
1468 *
1469 * @returns VBox status code.
1470 * @param pVM The VM to operate on.
1471 * @param pPatch Patch record
1472 * @param pCurInstrGC Guest instruction address
1473 */
1474int patmPatchGenCpuid(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC)
1475{
1476 uint32_t size;
1477 PATCHGEN_PROLOG(pVM, pPatch);
1478
1479 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMCpuidRecord, 0, false);
1480
1481 PATCHGEN_EPILOG(pPatch, size);
1482 return VINF_SUCCESS;
1483}
1484
1485/**
1486 * Generate the jump from guest to patch code
1487 *
1488 * @returns VBox status code.
1489 * @param pVM The VM to operate on.
1490 * @param pPatch Patch record
1491 * @param pTargetGC Guest target jump
1492 * @param fClearInhibitIRQs Clear inhibit irq flag
1493 */
1494int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, GCPTRTYPE(uint8_t *)pReturnAddrGC, bool fClearInhibitIRQs)
1495{
1496 int rc = VINF_SUCCESS;
1497 uint32_t size;
1498
1499 if (fClearInhibitIRQs)
1500 {
1501 rc = patmPatchGenClearInhibitIRQ(pVM, pPatch, pReturnAddrGC);
1502 if (rc == VERR_NO_MEMORY)
1503 return rc;
1504 AssertRCReturn(rc, rc);
1505 }
1506
1507 PATCHGEN_PROLOG(pVM, pPatch);
1508
1509 /* Add lookup record for patch to guest address translation */
1510 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
1511
1512 /* Generate code to jump to guest code if IF=1, else fault. */
1513 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
1514 PATCHGEN_EPILOG(pPatch, size);
1515
1516 return rc;
1517}
1518
1519/*
1520 * Relative jump from patch code to patch code (no fixup required)
1521 */
1522int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTGCPTR pCurInstrGC, GCPTRTYPE(uint8_t *)pPatchAddrGC)
1523{
1524 int32_t displ;
1525 int rc = VINF_SUCCESS;
1526
1527 Assert(PATMIsPatchGCAddr(pVM, pPatchAddrGC));
1528 PATCHGEN_PROLOG(pVM, pPatch);
1529
1530 /* Add lookup record for patch to guest address translation */
1531 patmr3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
1532
1533 pPB[0] = 0xE9; //JMP
1534
1535 displ = pPatchAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + SIZEOF_NEARJUMP32);
1536
1537 *(uint32_t *)&pPB[1] = displ;
1538
1539 PATCHGEN_EPILOG(pPatch, SIZEOF_NEARJUMP32);
1540
1541 return rc;
1542}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette