VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CSAM.cpp@ 51574

Last change on this file since 51574 was 50575, checked in by vboxsync, 11 years ago

VMM: Added SSMR3RegisterStub and used it to provide saved state load-exec stubs for ignoring CSAM and PATM state when VBOX_WITH_RAW_MODE isn't defined or when HM is active.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 100.0 KB
Line 
1/* $Id: CSAM.cpp 50575 2014-02-25 13:07:16Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_CSAM
22#include <VBox/vmm/cpum.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/csam.h>
26#include <VBox/vmm/cpumdis.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/em.h>
31#include <VBox/vmm/hm.h>
32#ifdef VBOX_WITH_REM
33# include <VBox/vmm/rem.h>
34#endif
35#include <VBox/vmm/selm.h>
36#include <VBox/vmm/trpm.h>
37#include <VBox/vmm/cfgm.h>
38#include <VBox/vmm/ssm.h>
39#include <VBox/param.h>
40#include <iprt/avl.h>
41#include <iprt/asm.h>
42#include <iprt/thread.h>
43#include "CSAMInternal.h"
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/uvm.h>
46
47#include <VBox/dbg.h>
48#include <VBox/sup.h>
49#include <VBox/err.h>
50#include <VBox/log.h>
51
52#include <VBox/dis.h>
53#include <VBox/disopcode.h>
54#include <iprt/assert.h>
55#include <iprt/string.h>
56#include "internal/pgm.h"
57
58
59/* Enabled by default */
60#define CSAM_ENABLE
61
62/* Enable to monitor code pages for self-modifying code. */
63#define CSAM_MONITOR_CODE_PAGES
64/* Enable to monitor all scanned pages
65#define CSAM_MONITOR_CSAM_CODE_PAGES */
66/* Enable to scan beyond ret instructions.
67#define CSAM_ANALYSE_BEYOND_RET */
68
69/*******************************************************************************
70* Internal Functions *
71*******************************************************************************/
72static DECLCALLBACK(int) csamr3Save(PVM pVM, PSSMHANDLE pSSM);
73static DECLCALLBACK(int) csamr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
74static DECLCALLBACK(int) CSAMCodePageWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
75static DECLCALLBACK(int) CSAMCodePageInvalidate(PVM pVM, RTGCPTR GCPtr);
76
77bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage);
78int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstr);
79static PCSAMPAGE csamCreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation = false);
80static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr);
81static int csamReinit(PVM pVM);
82static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t opsize, bool fScanned);
83static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
84 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec);
85
86/** @todo Temporary for debugging. */
87static bool fInCSAMCodePageInvalidate = false;
88
89#ifdef VBOX_WITH_DEBUGGER
90static FNDBGCCMD csamr3CmdOn;
91static FNDBGCCMD csamr3CmdOff;
92#endif
93
94
95/*******************************************************************************
96* Global Variables *
97*******************************************************************************/
98#ifdef VBOX_WITH_DEBUGGER
99/** Command descriptors. */
100static const DBGCCMD g_aCmds[] =
101{
102 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
103 { "csamon", 0, 0, NULL, 0, 0, csamr3CmdOn, "", "Enable CSAM code scanning." },
104 { "csamoff", 0, 0, NULL, 0, 0, csamr3CmdOff, "", "Disable CSAM code scanning." },
105};
106#endif
107
108/**
109 * SSM descriptor table for the CSAM structure.
110 */
111static const SSMFIELD g_aCsamFields[] =
112{
113 /** @todo there are more fields that can be ignored here. */
114 SSMFIELD_ENTRY_IGNORE( CSAM, offVM),
115 SSMFIELD_ENTRY_PAD_HC64( CSAM, Alignment0, sizeof(uint32_t)),
116 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPageTree),
117 SSMFIELD_ENTRY( CSAM, aDangerousInstr),
118 SSMFIELD_ENTRY( CSAM, cDangerousInstr),
119 SSMFIELD_ENTRY( CSAM, iDangerousInstr),
120 SSMFIELD_ENTRY_RCPTR( CSAM, pPDBitmapGC), /// @todo ignore this?
121 SSMFIELD_ENTRY_RCPTR( CSAM, pPDHCBitmapGC), /// @todo ignore this?
122 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDBitmapHC),
123 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDGCBitmapHC),
124 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, savedstate.pSSM),
125 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
126 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
127 SSMFIELD_ENTRY( CSAM, cDirtyPages),
128 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
129 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
130 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
131 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
132 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction),
133 SSMFIELD_ENTRY( CSAM, iCallInstruction),
134 SSMFIELD_ENTRY( CSAM, fScanningStarted),
135 SSMFIELD_ENTRY( CSAM, fGatesChecked),
136 SSMFIELD_ENTRY_PAD_HC( CSAM, Alignment1, 6, 2),
137 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrTraps),
138 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPages),
139 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPagesInv),
140 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrRemovedPages),
141 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPatchPages),
142 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPHC),
143 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPGC),
144 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushes),
145 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushesSkipped),
146 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesHC),
147 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesGC),
148 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrInstr),
149 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrBytesRead),
150 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrOpcodeRead),
151 SSMFIELD_ENTRY_IGNORE( CSAM, StatTime),
152 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeCheckAddr),
153 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeAddrConv),
154 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeFlushPage),
155 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeDisasm),
156 SSMFIELD_ENTRY_IGNORE( CSAM, StatFlushDirtyPages),
157 SSMFIELD_ENTRY_IGNORE( CSAM, StatCheckGates),
158 SSMFIELD_ENTRY_IGNORE( CSAM, StatCodePageModified),
159 SSMFIELD_ENTRY_IGNORE( CSAM, StatDangerousWrite),
160 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheHit),
161 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheMiss),
162 SSMFIELD_ENTRY_IGNORE( CSAM, StatPagePATM),
163 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageCSAM),
164 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageREM),
165 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrUserPages),
166 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageMonitor),
167 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageRemoveREMFlush),
168 SSMFIELD_ENTRY_IGNORE( CSAM, StatBitmapAlloc),
169 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunction),
170 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunctionFailed),
171 SSMFIELD_ENTRY_TERM()
172};
173
174/** Fake type to simplify g_aCsamPDBitmapArray construction. */
175typedef struct
176{
177 uint8_t *a[CSAM_PGDIRBMP_CHUNKS];
178} CSAMPDBITMAPARRAY;
179
180/**
181 * SSM descriptor table for the CSAM::pPDBitmapHC array.
182 */
183static SSMFIELD const g_aCsamPDBitmapArray[] =
184{
185 SSMFIELD_ENTRY_HCPTR_NI_ARRAY(CSAMPDBITMAPARRAY, a),
186 SSMFIELD_ENTRY_TERM()
187};
188
189/**
190 * SSM descriptor table for the CSAMPAGEREC structure.
191 */
192static const SSMFIELD g_aCsamPageRecFields[] =
193{
194 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.Key),
195 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pLeft),
196 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pRight),
197 SSMFIELD_ENTRY_IGNORE( CSAMPAGEREC, Core.uchHeight),
198 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
199 SSMFIELD_ENTRY_RCPTR( CSAMPAGEREC, page.pPageGC),
200 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
201 SSMFIELD_ENTRY_PAD_MSC32_AUTO( 4),
202 SSMFIELD_ENTRY_GCPHYS( CSAMPAGEREC, page.GCPhys),
203 SSMFIELD_ENTRY( CSAMPAGEREC, page.fFlags),
204 SSMFIELD_ENTRY( CSAMPAGEREC, page.uSize),
205 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
206 SSMFIELD_ENTRY_HCPTR_NI( CSAMPAGEREC, page.pBitmap),
207 SSMFIELD_ENTRY( CSAMPAGEREC, page.fCode32),
208 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorActive),
209 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorInvalidation),
210 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 1),
211 SSMFIELD_ENTRY( CSAMPAGEREC, page.enmTag),
212 SSMFIELD_ENTRY( CSAMPAGEREC, page.u64Hash),
213 SSMFIELD_ENTRY_TERM()
214};
215
216
217/**
218 * Initializes the CSAM.
219 *
220 * @returns VBox status code.
221 * @param pVM Pointer to the VM.
222 */
223VMMR3_INT_DECL(int) CSAMR3Init(PVM pVM)
224{
225 int rc;
226
227 /*
228 * We only need a saved state dummy loader if HM is enabled.
229 */
230 if (HMIsEnabled(pVM))
231 {
232 pVM->fCSAMEnabled = false;
233 return SSMR3RegisterStub(pVM, "CSAM", 0);
234 }
235
236 /*
237 * Raw-mode.
238 */
239 LogFlow(("CSAMR3Init\n"));
240
241 /* Allocate bitmap for the page directory. */
242 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC);
243 AssertRCReturn(rc, rc);
244 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTRCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDGCBitmapHC);
245 AssertRCReturn(rc, rc);
246 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
247 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
248
249 rc = csamReinit(pVM);
250 AssertRCReturn(rc, rc);
251
252 /*
253 * Register save and load state notifiers.
254 */
255 rc = SSMR3RegisterInternal(pVM, "CSAM", 0, CSAM_SSM_VERSION, sizeof(pVM->csam.s) + PAGE_SIZE*16,
256 NULL, NULL, NULL,
257 NULL, csamr3Save, NULL,
258 NULL, csamr3Load, NULL);
259 AssertRCReturn(rc, rc);
260
261 STAM_REG(pVM, &pVM->csam.s.StatNrTraps, STAMTYPE_COUNTER, "/CSAM/PageTraps", STAMUNIT_OCCURENCES, "The number of CSAM page traps.");
262 STAM_REG(pVM, &pVM->csam.s.StatDangerousWrite, STAMTYPE_COUNTER, "/CSAM/DangerousWrites", STAMUNIT_OCCURENCES, "The number of dangerous writes that cause a context switch.");
263
264 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPHC, STAMTYPE_COUNTER, "/CSAM/HC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
265 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPGC, STAMTYPE_COUNTER, "/CSAM/GC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
266 STAM_REG(pVM, &pVM->csam.s.StatNrPages, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRW", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW monitoring).");
267 STAM_REG(pVM, &pVM->csam.s.StatNrPagesInv, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRWI", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW & invalidation monitoring).");
268 STAM_REG(pVM, &pVM->csam.s.StatNrRemovedPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Removed", STAMUNIT_OCCURENCES, "The number of removed CSAM page records.");
269 STAM_REG(pVM, &pVM->csam.s.StatPageRemoveREMFlush,STAMTYPE_COUNTER, "/CSAM/PageRec/Removed/REMFlush", STAMUNIT_OCCURENCES, "The number of removed CSAM page records that caused a REM flush.");
270
271 STAM_REG(pVM, &pVM->csam.s.StatNrPatchPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Patch", STAMUNIT_OCCURENCES, "The number of CSAM patch page records.");
272 STAM_REG(pVM, &pVM->csam.s.StatNrUserPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Ignore/User", STAMUNIT_OCCURENCES, "The number of CSAM user page records (ignored).");
273 STAM_REG(pVM, &pVM->csam.s.StatPagePATM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/PATM", STAMUNIT_OCCURENCES, "The number of PATM page records.");
274 STAM_REG(pVM, &pVM->csam.s.StatPageCSAM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/CSAM", STAMUNIT_OCCURENCES, "The number of CSAM page records.");
275 STAM_REG(pVM, &pVM->csam.s.StatPageREM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/REM", STAMUNIT_OCCURENCES, "The number of REM page records.");
276 STAM_REG(pVM, &pVM->csam.s.StatPageMonitor, STAMTYPE_COUNTER, "/CSAM/PageRec/Monitored", STAMUNIT_OCCURENCES, "The number of monitored pages.");
277
278 STAM_REG(pVM, &pVM->csam.s.StatCodePageModified, STAMTYPE_COUNTER, "/CSAM/Monitor/DirtyPage", STAMUNIT_OCCURENCES, "The number of code page modifications.");
279
280 STAM_REG(pVM, &pVM->csam.s.StatNrFlushes, STAMTYPE_COUNTER, "/CSAM/PageFlushes", STAMUNIT_OCCURENCES, "The number of CSAM page flushes.");
281 STAM_REG(pVM, &pVM->csam.s.StatNrFlushesSkipped, STAMTYPE_COUNTER, "/CSAM/PageFlushesSkipped", STAMUNIT_OCCURENCES, "The number of CSAM page flushes that were skipped.");
282 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesHC, STAMTYPE_COUNTER, "/CSAM/HC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
283 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesGC, STAMTYPE_COUNTER, "/CSAM/GC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
284 STAM_REG(pVM, &pVM->csam.s.StatNrInstr, STAMTYPE_COUNTER, "/CSAM/ScannedInstr", STAMUNIT_OCCURENCES, "The number of scanned instructions.");
285 STAM_REG(pVM, &pVM->csam.s.StatNrBytesRead, STAMTYPE_COUNTER, "/CSAM/BytesRead", STAMUNIT_OCCURENCES, "The number of bytes read for scanning.");
286 STAM_REG(pVM, &pVM->csam.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/CSAM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
287
288 STAM_REG(pVM, &pVM->csam.s.StatBitmapAlloc, STAMTYPE_COUNTER, "/CSAM/Alloc/PageBitmap", STAMUNIT_OCCURENCES, "The number of page bitmap allocations.");
289
290 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheHit, STAMTYPE_COUNTER, "/CSAM/Cache/Hit", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache hits.");
291 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheMiss, STAMTYPE_COUNTER, "/CSAM/Cache/Miss", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache misses.");
292
293 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunction, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Success", STAMUNIT_OCCURENCES, "The number of found functions beyond the ret border.");
294 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunctionFailed, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Failed", STAMUNIT_OCCURENCES, "The number of refused functions beyond the ret border.");
295
296 STAM_REG(pVM, &pVM->csam.s.StatTime, STAMTYPE_PROFILE, "/PROF/CSAM/Scan", STAMUNIT_TICKS_PER_CALL, "Scanning overhead.");
297 STAM_REG(pVM, &pVM->csam.s.StatTimeCheckAddr, STAMTYPE_PROFILE, "/PROF/CSAM/CheckAddr", STAMUNIT_TICKS_PER_CALL, "Address check overhead.");
298 STAM_REG(pVM, &pVM->csam.s.StatTimeAddrConv, STAMTYPE_PROFILE, "/PROF/CSAM/AddrConv", STAMUNIT_TICKS_PER_CALL, "Address conversion overhead.");
299 STAM_REG(pVM, &pVM->csam.s.StatTimeFlushPage, STAMTYPE_PROFILE, "/PROF/CSAM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Page flushing overhead.");
300 STAM_REG(pVM, &pVM->csam.s.StatTimeDisasm, STAMTYPE_PROFILE, "/PROF/CSAM/Disasm", STAMUNIT_TICKS_PER_CALL, "Disassembly overhead.");
301 STAM_REG(pVM, &pVM->csam.s.StatFlushDirtyPages, STAMTYPE_PROFILE, "/PROF/CSAM/FlushDirtyPage", STAMUNIT_TICKS_PER_CALL, "Dirty page flushing overhead.");
302 STAM_REG(pVM, &pVM->csam.s.StatCheckGates, STAMTYPE_PROFILE, "/PROF/CSAM/CheckGates", STAMUNIT_TICKS_PER_CALL, "CSAMR3CheckGates overhead.");
303
304 /*
305 * Check CFGM option and enable/disable CSAM.
306 */
307 bool fEnabled;
308 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "CSAMEnabled", &fEnabled);
309 if (RT_FAILURE(rc))
310#ifdef CSAM_ENABLE
311 fEnabled = true;
312#else
313 fEnabled = false;
314#endif
315 if (fEnabled)
316 CSAMEnableScanning(pVM);
317
318#ifdef VBOX_WITH_DEBUGGER
319 /*
320 * Debugger commands.
321 */
322 static bool fRegisteredCmds = false;
323 if (!fRegisteredCmds)
324 {
325 rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
326 if (RT_SUCCESS(rc))
327 fRegisteredCmds = true;
328 }
329#endif
330
331 return VINF_SUCCESS;
332}
333
334/**
335 * (Re)initializes CSAM
336 *
337 * @param pVM The VM.
338 */
339static int csamReinit(PVM pVM)
340{
341 /*
342 * Assert alignment and sizes.
343 */
344 AssertRelease(!(RT_OFFSETOF(VM, csam.s) & 31));
345 AssertRelease(sizeof(pVM->csam.s) <= sizeof(pVM->csam.padding));
346 AssertRelease(!HMIsEnabled(pVM));
347
348 /*
349 * Setup any fixed pointers and offsets.
350 */
351 pVM->csam.s.offVM = RT_OFFSETOF(VM, patm);
352
353 pVM->csam.s.fGatesChecked = false;
354 pVM->csam.s.fScanningStarted = false;
355
356 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies 1 VPCU */
357 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
358 pVM->csam.s.cDirtyPages = 0;
359 /* not necessary */
360 memset(pVM->csam.s.pvDirtyBasePage, 0, sizeof(pVM->csam.s.pvDirtyBasePage));
361 memset(pVM->csam.s.pvDirtyFaultPage, 0, sizeof(pVM->csam.s.pvDirtyFaultPage));
362
363 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
364 pVM->csam.s.cDangerousInstr = 0;
365 pVM->csam.s.iDangerousInstr = 0;
366
367 memset(pVM->csam.s.pvCallInstruction, 0, sizeof(pVM->csam.s.pvCallInstruction));
368 pVM->csam.s.iCallInstruction = 0;
369
370 /** @note never mess with the pgdir bitmap here! */
371 return VINF_SUCCESS;
372}
373
374/**
375 * Applies relocations to data and code managed by this
376 * component. This function will be called at init and
377 * whenever the VMM need to relocate itself inside the GC.
378 *
379 * The csam will update the addresses used by the switcher.
380 *
381 * @param pVM The VM.
382 * @param offDelta Relocation delta.
383 */
384VMMR3_INT_DECL(void) CSAMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
385{
386 if (offDelta && !HMIsEnabled(pVM))
387 {
388 /* Adjust pgdir and page bitmap pointers. */
389 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
390 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
391
392 for(int i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
393 {
394 if (pVM->csam.s.pPDGCBitmapHC[i])
395 {
396 pVM->csam.s.pPDGCBitmapHC[i] += offDelta;
397 }
398 }
399 }
400 return;
401}
402
403/**
404 * Terminates the csam.
405 *
406 * Termination means cleaning up and freeing all resources,
407 * the VM it self is at this point powered off or suspended.
408 *
409 * @returns VBox status code.
410 * @param pVM Pointer to the VM.
411 */
412VMMR3_INT_DECL(int) CSAMR3Term(PVM pVM)
413{
414 if (HMIsEnabled(pVM))
415 return VINF_SUCCESS;
416
417 int rc;
418
419 rc = CSAMR3Reset(pVM);
420 AssertRC(rc);
421
422 /* @todo triggers assertion in MMHyperFree */
423#if 0
424 for(int i=0;i<CSAM_PAGEBMP_CHUNKS;i++)
425 {
426 if (pVM->csam.s.pPDBitmapHC[i])
427 MMHyperFree(pVM, pVM->csam.s.pPDBitmapHC[i]);
428 }
429#endif
430
431 return VINF_SUCCESS;
432}
433
434/**
435 * CSAM reset callback.
436 *
437 * @returns VBox status code.
438 * @param pVM The VM which is reset.
439 */
440VMMR3_INT_DECL(int) CSAMR3Reset(PVM pVM)
441{
442 if (HMIsEnabled(pVM))
443 return VINF_SUCCESS;
444
445 /* Clear page bitmaps. */
446 for (int i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
447 {
448 if (pVM->csam.s.pPDBitmapHC[i])
449 {
450 Assert((CSAM_PAGE_BITMAP_SIZE& 3) == 0);
451 ASMMemZero32(pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
452 }
453 }
454
455 /* Remove all CSAM page records. */
456 for (;;)
457 {
458 PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGetBestFit(&pVM->csam.s.pPageTree, 0, true);
459 if (!pPageRec)
460 break;
461 csamRemovePageRecord(pVM, pPageRec->page.pPageGC);
462 }
463 Assert(!pVM->csam.s.pPageTree);
464
465 csamReinit(pVM);
466
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * Callback function for RTAvlPVDoWithAll
473 *
474 * Counts the number of records in the tree
475 *
476 * @returns VBox status code.
477 * @param pNode Current node
478 * @param pcPatches Pointer to patch counter
479 */
480static DECLCALLBACK(int) CountRecord(PAVLPVNODECORE pNode, void *pcPatches)
481{
482 NOREF(pNode);
483 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
484 return VINF_SUCCESS;
485}
486
487/**
488 * Callback function for RTAvlPVDoWithAll
489 *
490 * Saves the state of the page record
491 *
492 * @returns VBox status code.
493 * @param pNode Current node
494 * @param pVM1 Pointer to the VM
495 */
496static DECLCALLBACK(int) SavePageState(PAVLPVNODECORE pNode, void *pVM1)
497{
498 PVM pVM = (PVM)pVM1;
499 PCSAMPAGEREC pPage = (PCSAMPAGEREC)pNode;
500 CSAMPAGEREC page = *pPage;
501 PSSMHANDLE pSSM = pVM->csam.s.savedstate.pSSM;
502 int rc;
503
504 /* Save the page record itself */
505 rc = SSMR3PutMem(pSSM, &page, sizeof(page));
506 AssertRCReturn(rc, rc);
507
508 if (page.page.pBitmap)
509 {
510 rc = SSMR3PutMem(pSSM, page.page.pBitmap, CSAM_PAGE_BITMAP_SIZE);
511 AssertRCReturn(rc, rc);
512 }
513
514 return VINF_SUCCESS;
515}
516
517/**
518 * Execute state save operation.
519 *
520 * @returns VBox status code.
521 * @param pVM Pointer to the VM.
522 * @param pSSM SSM operation handle.
523 */
524static DECLCALLBACK(int) csamr3Save(PVM pVM, PSSMHANDLE pSSM)
525{
526 CSAM csamInfo = pVM->csam.s;
527 int rc;
528
529 /*
530 * Count the number of page records in the tree (feeling lazy)
531 */
532 csamInfo.savedstate.cPageRecords = 0;
533 RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, CountRecord, &csamInfo.savedstate.cPageRecords);
534
535 /*
536 * Save CSAM structure
537 */
538 pVM->csam.s.savedstate.pSSM = pSSM;
539 rc = SSMR3PutMem(pSSM, &csamInfo, sizeof(csamInfo));
540 AssertRCReturn(rc, rc);
541
542 /* Save pgdir bitmap */
543 rc = SSMR3PutMem(pSSM, csamInfo.pPDBitmapHC, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR));
544 AssertRCReturn(rc, rc);
545
546 for (unsigned i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
547 {
548 if(csamInfo.pPDBitmapHC[i])
549 {
550 /* Save the page bitmap. */
551 rc = SSMR3PutMem(pSSM, csamInfo.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
552 AssertRCReturn(rc, rc);
553 }
554 }
555
556 /*
557 * Save page records
558 */
559 rc = RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, SavePageState, pVM);
560 AssertRCReturn(rc, rc);
561
562 /** @note we don't restore aDangerousInstr; it will be recreated automatically. */
563 return VINF_SUCCESS;
564}
565
566
567/**
568 * Execute state load operation.
569 *
570 * @returns VBox status code.
571 * @param pVM Pointer to the VM.
572 * @param pSSM SSM operation handle.
573 * @param uVersion Data layout version.
574 * @param uPass The data pass.
575 */
576static DECLCALLBACK(int) csamr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
577{
578 int rc;
579 CSAM csamInfo;
580
581 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
582 if (uVersion != CSAM_SSM_VERSION)
583 {
584 AssertMsgFailed(("csamR3Load: Invalid version uVersion=%d!\n", uVersion));
585 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
586 }
587
588 pVM->csam.s.savedstate.pSSM = pSSM;
589
590 /*
591 * Restore CSAM structure
592 */
593 RT_ZERO(csamInfo);
594 rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamFields[0], NULL);
595 AssertRCReturn(rc, rc);
596
597 pVM->csam.s.fGatesChecked = csamInfo.fGatesChecked;
598 pVM->csam.s.fScanningStarted = csamInfo.fScanningStarted;
599
600 /* Restore dirty code page info. */
601 pVM->csam.s.cDirtyPages = csamInfo.cDirtyPages;
602 memcpy(pVM->csam.s.pvDirtyBasePage, csamInfo.pvDirtyBasePage, sizeof(pVM->csam.s.pvDirtyBasePage));
603 memcpy(pVM->csam.s.pvDirtyFaultPage, csamInfo.pvDirtyFaultPage, sizeof(pVM->csam.s.pvDirtyFaultPage));
604
605 /* Restore possible code page */
606 pVM->csam.s.cPossibleCodePages = csamInfo.cPossibleCodePages;
607 memcpy(pVM->csam.s.pvPossibleCodePage, csamInfo.pvPossibleCodePage, sizeof(pVM->csam.s.pvPossibleCodePage));
608
609 /* Restore pgdir bitmap (we'll change the pointers next). */
610 rc = SSMR3GetStructEx(pSSM, pVM->csam.s.pPDBitmapHC, sizeof(uint8_t *) * CSAM_PGDIRBMP_CHUNKS,
611 SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPDBitmapArray[0], NULL);
612 AssertRCReturn(rc, rc);
613
614 /*
615 * Restore page bitmaps
616 */
617 for (unsigned i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
618 {
619 if(pVM->csam.s.pPDBitmapHC[i])
620 {
621 rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC[i]);
622 if (RT_FAILURE(rc))
623 {
624 Log(("MMHyperAlloc failed with %Rrc\n", rc));
625 return rc;
626 }
627 /* Convert to GC pointer. */
628 pVM->csam.s.pPDGCBitmapHC[i] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[i]);
629 Assert(pVM->csam.s.pPDGCBitmapHC[i]);
630
631 /* Restore the bitmap. */
632 rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
633 AssertRCReturn(rc, rc);
634 }
635 else
636 {
637 Assert(!pVM->csam.s.pPDGCBitmapHC[i]);
638 pVM->csam.s.pPDGCBitmapHC[i] = 0;
639 }
640 }
641
642 /*
643 * Restore page records
644 */
645 for (uint32_t i=0;i<csamInfo.savedstate.cPageRecords + csamInfo.savedstate.cPatchPageRecords;i++)
646 {
647 CSAMPAGEREC page;
648 PCSAMPAGE pPage;
649
650 RT_ZERO(page);
651 rc = SSMR3GetStructEx(pSSM, &page, sizeof(page), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPageRecFields[0], NULL);
652 AssertRCReturn(rc, rc);
653
654 /*
655 * Recreate the page record
656 */
657 pPage = csamCreatePageRecord(pVM, page.page.pPageGC, page.page.enmTag, page.page.fCode32, page.page.fMonitorInvalidation);
658 AssertReturn(pPage, VERR_NO_MEMORY);
659
660 pPage->GCPhys = page.page.GCPhys;
661 pPage->fFlags = page.page.fFlags;
662 pPage->u64Hash = page.page.u64Hash;
663
664 if (page.page.pBitmap)
665 {
666 rc = SSMR3GetMem(pSSM, pPage->pBitmap, CSAM_PAGE_BITMAP_SIZE);
667 AssertRCReturn(rc, rc);
668 }
669 else
670 {
671 MMR3HeapFree(pPage->pBitmap);
672 pPage->pBitmap = 0;
673 }
674 }
675
676 /* Note: we don't restore aDangerousInstr; it will be recreated automatically. */
677 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
678 pVM->csam.s.cDangerousInstr = 0;
679 pVM->csam.s.iDangerousInstr = 0;
680 return VINF_SUCCESS;
681}
682
683/**
684 * Convert guest context address to host context pointer
685 *
686 * @returns Byte pointer (ring-3 context) corresponding to pGCPtr on success,
687 * NULL on failure.
688 * @param pVM Pointer to the VM.
689 * @param pCacheRec Address conversion cache record
690 * @param pGCPtr Guest context pointer
691 * @returns Host context pointer or NULL in case of an error
692 *
693 */
694static uint8_t *csamR3GCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
695{
696 int rc;
697 void *pHCPtr;
698 Assert(pVM->cCpus == 1);
699 PVMCPU pVCpu = VMMGetCpu0(pVM);
700
701 STAM_PROFILE_START(&pVM->csam.s.StatTimeAddrConv, a);
702
703 pHCPtr = PATMR3GCPtrToHCPtr(pVM, pGCPtr);
704 if (pHCPtr)
705 return (uint8_t *)pHCPtr;
706
707 if (pCacheRec->pPageLocStartHC)
708 {
709 uint32_t offset = pGCPtr & PAGE_OFFSET_MASK;
710 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
711 {
712 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
713 return pCacheRec->pPageLocStartHC + offset;
714 }
715 }
716
717 /* Release previous lock if any. */
718 if (pCacheRec->Lock.pvMap)
719 {
720 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
721 pCacheRec->Lock.pvMap = NULL;
722 }
723
724 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
725 if (rc != VINF_SUCCESS)
726 {
727//// AssertMsgRC(rc, ("MMR3PhysGCVirt2HCVirtEx failed for %RRv\n", pGCPtr));
728 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
729 return NULL;
730 }
731
732 pCacheRec->pPageLocStartHC = (uint8_t*)((uintptr_t)pHCPtr & PAGE_BASE_HC_MASK);
733 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
734 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
735 return (uint8_t *)pHCPtr;
736}
737
738
739/** For csamR3ReadBytes. */
740typedef struct CSAMDISINFO
741{
742 PVM pVM;
743 uint8_t const *pbSrcInstr; /* aka pInstHC */
744} CSAMDISINFO, *PCSAMDISINFO;
745
746
747/**
748 * @callback_method_impl{FNDISREADBYTES}
749 */
750static DECLCALLBACK(int) csamR3ReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
751{
752 PCSAMDISINFO pDisInfo = (PCSAMDISINFO)pDis->pvUser;
753
754 /*
755 * We are not interested in patched instructions, so read the original opcode bytes.
756 *
757 * Note! single instruction patches (int3) are checked in CSAMR3AnalyseCallback
758 *
759 * Since we're decoding one instruction at the time, we don't need to be
760 * concerned about any patched instructions following the first one. We
761 * could in fact probably skip this PATM call for offInstr != 0.
762 */
763 size_t cbRead = cbMaxRead;
764 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
765 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
766 if (RT_SUCCESS(rc))
767 {
768 if (cbRead >= cbMinRead)
769 {
770 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
771 return rc;
772 }
773
774 cbMinRead -= (uint8_t)cbRead;
775 cbMaxRead -= (uint8_t)cbRead;
776 offInstr += (uint8_t)cbRead;
777 uSrcAddr += cbRead;
778 }
779
780 /*
781 * The current byte isn't a patch instruction byte.
782 */
783 AssertPtr(pDisInfo->pbSrcInstr);
784 if ((pDis->uInstrAddr >> PAGE_SHIFT) == ((uSrcAddr + cbMaxRead - 1) >> PAGE_SHIFT))
785 {
786 memcpy(&pDis->abInstr[offInstr], &pDisInfo->pbSrcInstr[offInstr], cbMaxRead);
787 offInstr += cbMaxRead;
788 rc = VINF_SUCCESS;
789 }
790 else if ( (pDis->uInstrAddr >> PAGE_SHIFT) == ((uSrcAddr + cbMinRead - 1) >> PAGE_SHIFT)
791 || PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr) /** @todo does CSAM actually analyze patch code, or is this just a copy&past check? */
792 )
793 {
794 memcpy(&pDis->abInstr[offInstr], &pDisInfo->pbSrcInstr[offInstr], cbMinRead);
795 offInstr += cbMinRead;
796 rc = VINF_SUCCESS;
797 }
798 else
799 {
800 /* Crossed page boundrary, pbSrcInstr is no good... */
801 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pDisInfo->pVM), &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
802 offInstr += cbMinRead;
803 }
804
805 pDis->cbCachedInstr = offInstr;
806 return rc;
807}
808
809DECLINLINE(int) csamR3DISInstr(PVM pVM, RTRCPTR InstrGC, uint8_t *InstrHC, DISCPUMODE enmCpuMode,
810 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
811{
812 CSAMDISINFO DisInfo = { pVM, InstrHC };
813#ifdef DEBUG
814 return DISInstrToStrEx(InstrGC, enmCpuMode, csamR3ReadBytes, &DisInfo, DISOPTYPE_ALL,
815 pCpu, pcbInstr, pszOutput, cbOutput);
816#else
817 /* We are interested in everything except harmless stuff */
818 if (pszOutput)
819 return DISInstrToStrEx(InstrGC, enmCpuMode, csamR3ReadBytes, &DisInfo,
820 ~(DISOPTYPE_INVALID | DISOPTYPE_HARMLESS | DISOPTYPE_RRM_MASK),
821 pCpu, pcbInstr, pszOutput, cbOutput);
822 return DISInstrEx(InstrGC, enmCpuMode, ~(DISOPTYPE_INVALID | DISOPTYPE_HARMLESS | DISOPTYPE_RRM_MASK),
823 csamR3ReadBytes, &DisInfo, pCpu, pcbInstr);
824#endif
825}
826
827/**
828 * Analyses the instructions following the cli for compliance with our heuristics for cli
829 *
830 * @returns VBox status code.
831 * @param pVM Pointer to the VM.
832 * @param pCpu CPU disassembly state
833 * @param pInstrGC Guest context pointer to privileged instruction
834 * @param pCurInstrGC Guest context pointer to the current instruction
835 * @param pCacheRec GC to HC cache record
836 * @param pUserData User pointer (callback specific)
837 *
838 */
839static int CSAMR3AnalyseCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC,
840 PCSAMP2GLOOKUPREC pCacheRec, void *pUserData)
841{
842 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
843 int rc;
844 NOREF(pInstrGC);
845
846 switch (pCpu->pCurInstr->uOpcode)
847 {
848 case OP_INT:
849 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE8);
850 if (pCpu->Param1.uValue == 3)
851 {
852 //two byte int 3
853 return VINF_SUCCESS;
854 }
855 break;
856
857 /* removing breaks win2k guests? */
858 case OP_IRET:
859 if (EMIsRawRing1Enabled(pVM))
860 break;
861 /* no break */
862
863 case OP_ILLUD2:
864 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue. */
865 case OP_RETN:
866 case OP_INT3:
867 case OP_INVALID:
868 return VINF_SUCCESS;
869 }
870
871 // Check for exit points
872 switch (pCpu->pCurInstr->uOpcode)
873 {
874 /* It's not a good idea to patch pushf instructions:
875 * - increases the chance of conflicts (code jumping to the next instruction)
876 * - better to patch the cli
877 * - code that branches before the cli will likely hit an int 3
878 * - in general doesn't offer any benefits as we don't allow nested patch blocks (IF is always 1)
879 */
880 case OP_PUSHF:
881 case OP_POPF:
882 break;
883
884 case OP_CLI:
885 {
886 uint32_t cbInstrs = 0;
887 uint32_t cbCurInstr = pCpu->cbInstr;
888 bool fCode32 = pPage->fCode32;
889
890 Assert(fCode32);
891
892 PATMR3AddHint(pVM, pCurInstrGC, (fCode32) ? PATMFL_CODE32 : 0);
893
894 /* Make sure the instructions that follow the cli have not been encountered before. */
895 while (true)
896 {
897 DISCPUSTATE cpu;
898
899 if (cbInstrs + cbCurInstr >= SIZEOF_NEARJUMP32)
900 break;
901
902 if (csamIsCodeScanned(pVM, pCurInstrGC + cbCurInstr, &pPage) == true)
903 {
904 /* We've scanned the next instruction(s) already. This means we've
905 followed a branch that ended up there before -> dangerous!! */
906 PATMR3DetectConflict(pVM, pCurInstrGC, pCurInstrGC + cbCurInstr);
907 break;
908 }
909 pCurInstrGC += cbCurInstr;
910 cbInstrs += cbCurInstr;
911
912 { /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
913 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
914 if (pCurInstrHC == NULL)
915 {
916 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
917 break;
918 }
919 Assert(VALID_PTR(pCurInstrHC));
920
921 rc = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
922 &cpu, &cbCurInstr, NULL, 0);
923 }
924 AssertRC(rc);
925 if (RT_FAILURE(rc))
926 break;
927 }
928 break;
929 }
930
931#ifdef VBOX_WITH_RAW_RING1
932 case OP_MOV:
933 /* mov xx, CS is a dangerous instruction as our raw ring usage leaks through. */
934 if ( EMIsRawRing1Enabled(pVM)
935 && (pCpu->Param2.fUse & DISUSE_REG_SEG)
936 && (pCpu->Param2.Base.idxSegReg == DISSELREG_CS))
937 {
938 Log(("CSAM: Patching dangerous 'mov xx, cs' instruction at %RGv with an int3\n", pCurInstrGC));
939 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
940 {
941 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
942 if (RT_FAILURE(rc))
943 {
944 Log(("PATMR3InstallPatch failed with %d\n", rc));
945 return VWRN_CONTINUE_ANALYSIS;
946 }
947 }
948 return VWRN_CONTINUE_ANALYSIS;
949 }
950 break;
951#endif
952
953 case OP_PUSH:
954 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
955 if (pCpu->pCurInstr->fParam1 != OP_PARM_REG_CS)
956 break;
957
958 /* no break */
959#ifndef VBOX_WITH_SAFE_STR
960 case OP_STR:
961#endif
962 case OP_LSL:
963 case OP_LAR:
964 case OP_SGDT:
965 case OP_SLDT:
966 case OP_SIDT:
967 case OP_SMSW:
968 case OP_VERW:
969 case OP_VERR:
970 case OP_CPUID:
971 case OP_IRET:
972#ifdef DEBUG
973 switch(pCpu->pCurInstr->uOpcode)
974 {
975 case OP_STR:
976 Log(("Privileged instruction at %RRv: str!!\n", pCurInstrGC));
977 break;
978 case OP_LSL:
979 Log(("Privileged instruction at %RRv: lsl!!\n", pCurInstrGC));
980 break;
981 case OP_LAR:
982 Log(("Privileged instruction at %RRv: lar!!\n", pCurInstrGC));
983 break;
984 case OP_SGDT:
985 Log(("Privileged instruction at %RRv: sgdt!!\n", pCurInstrGC));
986 break;
987 case OP_SLDT:
988 Log(("Privileged instruction at %RRv: sldt!!\n", pCurInstrGC));
989 break;
990 case OP_SIDT:
991 Log(("Privileged instruction at %RRv: sidt!!\n", pCurInstrGC));
992 break;
993 case OP_SMSW:
994 Log(("Privileged instruction at %RRv: smsw!!\n", pCurInstrGC));
995 break;
996 case OP_VERW:
997 Log(("Privileged instruction at %RRv: verw!!\n", pCurInstrGC));
998 break;
999 case OP_VERR:
1000 Log(("Privileged instruction at %RRv: verr!!\n", pCurInstrGC));
1001 break;
1002 case OP_CPUID:
1003 Log(("Privileged instruction at %RRv: cpuid!!\n", pCurInstrGC));
1004 break;
1005 case OP_PUSH:
1006 Log(("Privileged instruction at %RRv: push cs!!\n", pCurInstrGC));
1007 break;
1008 case OP_IRET:
1009 Log(("Privileged instruction at %RRv: iret!!\n", pCurInstrGC));
1010 break;
1011 }
1012#endif
1013
1014 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
1015 {
1016 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
1017 if (RT_FAILURE(rc))
1018 {
1019 Log(("PATMR3InstallPatch failed with %d\n", rc));
1020 return VWRN_CONTINUE_ANALYSIS;
1021 }
1022 }
1023 if (pCpu->pCurInstr->uOpcode == OP_IRET)
1024 return VINF_SUCCESS; /* Look no further in this branch. */
1025
1026 return VWRN_CONTINUE_ANALYSIS;
1027
1028 case OP_JMP:
1029 case OP_CALL:
1030 {
1031 // return or jump/call through a jump table
1032 if (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J)
1033 {
1034#ifdef DEBUG
1035 switch(pCpu->pCurInstr->uOpcode)
1036 {
1037 case OP_JMP:
1038 Log(("Control Flow instruction at %RRv: jmp!!\n", pCurInstrGC));
1039 break;
1040 case OP_CALL:
1041 Log(("Control Flow instruction at %RRv: call!!\n", pCurInstrGC));
1042 break;
1043 }
1044#endif
1045 return VWRN_CONTINUE_ANALYSIS;
1046 }
1047 return VWRN_CONTINUE_ANALYSIS;
1048 }
1049
1050 }
1051
1052 return VWRN_CONTINUE_ANALYSIS;
1053}
1054
1055#ifdef CSAM_ANALYSE_BEYOND_RET
1056/**
1057 * Wrapper for csamAnalyseCodeStream for call instructions.
1058 *
1059 * @returns VBox status code.
1060 * @param pVM Pointer to the VM.
1061 * @param pInstrGC Guest context pointer to privileged instruction
1062 * @param pCurInstrGC Guest context pointer to the current instruction
1063 * @param fCode32 16 or 32 bits code
1064 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1065 * @param pUserData User pointer (callback specific)
1066 *
1067 */
1068static int csamAnalyseCallCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1069 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1070{
1071 int rc;
1072 CSAMCALLEXITREC CallExitRec;
1073 PCSAMCALLEXITREC pOldCallRec;
1074 PCSAMPAGE pPage = 0;
1075 uint32_t i;
1076
1077 CallExitRec.cInstrAfterRet = 0;
1078
1079 pOldCallRec = pCacheRec->pCallExitRec;
1080 pCacheRec->pCallExitRec = &CallExitRec;
1081
1082 rc = csamAnalyseCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1083
1084 for (i=0;i<CallExitRec.cInstrAfterRet;i++)
1085 {
1086 PCSAMPAGE pPage = 0;
1087
1088 pCurInstrGC = CallExitRec.pInstrAfterRetGC[i];
1089
1090 /* Check if we've previously encountered the instruction after the ret. */
1091 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1092 {
1093 DISCPUSTATE cpu;
1094 uint32_t cbInstr;
1095 int rc2;
1096#ifdef DEBUG
1097 char szOutput[256];
1098#endif
1099 if (pPage == NULL)
1100 {
1101 /* New address; let's take a look at it. */
1102 pPage = csamCreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1103 if (pPage == NULL)
1104 {
1105 rc = VERR_NO_MEMORY;
1106 goto done;
1107 }
1108 }
1109
1110 /**
1111 * Some generic requirements for recognizing an adjacent function:
1112 * - alignment fillers that consist of:
1113 * - nop
1114 * - lea genregX, [genregX (+ 0)]
1115 * - push ebp after the filler (can extend this later); aligned at at least a 4 byte boundary
1116 */
1117 for (int j = 0; j < 16; j++)
1118 {
1119 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1120 if (pCurInstrHC == NULL)
1121 {
1122 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1123 goto done;
1124 }
1125 Assert(VALID_PTR(pCurInstrHC));
1126
1127 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1128#ifdef DEBUG
1129 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1130 &cpu, &cbInstr, szOutput, sizeof(szOutput));
1131 if (RT_SUCCESS(rc2)) Log(("CSAM Call Analysis: %s", szOutput));
1132#else
1133 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1134 &cpu, &cbInstr, NULL, 0);
1135#endif
1136 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1137 if (RT_FAILURE(rc2))
1138 {
1139 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1140 goto done;
1141 }
1142
1143 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, cbInstr);
1144
1145 RCPTRTYPE(uint8_t *) addr = 0;
1146 PCSAMPAGE pJmpPage = NULL;
1147
1148 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + cbInstr - 1))
1149 {
1150 if (!PGMGstIsPagePresent(pVM, pCurInstrGC + cbInstr - 1))
1151 {
1152 /// @todo fault in the page
1153 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1154 goto done;
1155 }
1156 //all is fine, let's continue
1157 csamR3CheckPageRecord(pVM, pCurInstrGC + cbInstr - 1);
1158 }
1159
1160 switch (cpu.pCurInstr->uOpcode)
1161 {
1162 case OP_NOP:
1163 case OP_INT3:
1164 break; /* acceptable */
1165
1166 case OP_LEA:
1167 /* Must be similar to:
1168 *
1169 * lea esi, [esi]
1170 * lea esi, [esi+0]
1171 * Any register is allowed as long as source and destination are identical.
1172 */
1173 if ( cpu.Param1.fUse != DISUSE_REG_GEN32
1174 || ( cpu.Param2.flags != DISUSE_REG_GEN32
1175 && ( !(cpu.Param2.flags & DISUSE_REG_GEN32)
1176 || !(cpu.Param2.flags & (DISUSE_DISPLACEMENT8|DISUSE_DISPLACEMENT16|DISUSE_DISPLACEMENT32))
1177 || cpu.Param2.uValue != 0
1178 )
1179 )
1180 || cpu.Param1.base.reg_gen32 != cpu.Param2.base.reg_gen32
1181 )
1182 {
1183 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1184 goto next_function;
1185 }
1186 break;
1187
1188 case OP_PUSH:
1189 {
1190 if ( (pCurInstrGC & 0x3) != 0
1191 || cpu.Param1.fUse != DISUSE_REG_GEN32
1192 || cpu.Param1.base.reg_gen32 != USE_REG_EBP
1193 )
1194 {
1195 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1196 goto next_function;
1197 }
1198
1199 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1200 {
1201 CSAMCALLEXITREC CallExitRec2;
1202 CallExitRec2.cInstrAfterRet = 0;
1203
1204 pCacheRec->pCallExitRec = &CallExitRec2;
1205
1206 /* Analyse the function. */
1207 Log(("Found new function at %RRv\n", pCurInstrGC));
1208 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1209 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1210 }
1211 goto next_function;
1212 }
1213
1214 case OP_SUB:
1215 {
1216 if ( (pCurInstrGC & 0x3) != 0
1217 || cpu.Param1.fUse != DISUSE_REG_GEN32
1218 || cpu.Param1.base.reg_gen32 != USE_REG_ESP
1219 )
1220 {
1221 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1222 goto next_function;
1223 }
1224
1225 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1226 {
1227 CSAMCALLEXITREC CallExitRec2;
1228 CallExitRec2.cInstrAfterRet = 0;
1229
1230 pCacheRec->pCallExitRec = &CallExitRec2;
1231
1232 /* Analyse the function. */
1233 Log(("Found new function at %RRv\n", pCurInstrGC));
1234 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1235 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1236 }
1237 goto next_function;
1238 }
1239
1240 default:
1241 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1242 goto next_function;
1243 }
1244 /* Mark it as scanned. */
1245 csamMarkCode(pVM, pPage, pCurInstrGC, cbInstr, true);
1246 pCurInstrGC += cbInstr;
1247 } /* for at most 16 instructions */
1248next_function:
1249 ; /* MSVC complains otherwise */
1250 }
1251 }
1252done:
1253 pCacheRec->pCallExitRec = pOldCallRec;
1254 return rc;
1255}
1256#else
1257#define csamAnalyseCallCodeStream csamAnalyseCodeStream
1258#endif
1259
1260/**
1261 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
1262 *
1263 * @returns VBox status code.
1264 * @param pVM Pointer to the VM.
1265 * @param pInstrGC Guest context pointer to privileged instruction
1266 * @param pCurInstrGC Guest context pointer to the current instruction
1267 * @param fCode32 16 or 32 bits code
1268 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1269 * @param pUserData User pointer (callback specific)
1270 *
1271 */
1272static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1273 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1274{
1275 DISCPUSTATE cpu;
1276 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
1277 int rc = VWRN_CONTINUE_ANALYSIS;
1278 uint32_t cbInstr;
1279 int rc2;
1280 Assert(pVM->cCpus == 1);
1281 PVMCPU pVCpu = VMMGetCpu0(pVM);
1282
1283#ifdef DEBUG
1284 char szOutput[256];
1285#endif
1286
1287 LogFlow(("csamAnalyseCodeStream: code at %RRv depth=%d\n", pCurInstrGC, pCacheRec->depth));
1288
1289 pVM->csam.s.fScanningStarted = true;
1290
1291 pCacheRec->depth++;
1292 /*
1293 * Limit the call depth. (rather arbitrary upper limit; too low and we won't detect certain
1294 * cpuid instructions in Linux kernels; too high and we waste too much time scanning code)
1295 * (512 is necessary to detect cpuid instructions in Red Hat EL4; see defect 1355)
1296 * @note we are using a lot of stack here. couple of 100k when we go to the full depth (!)
1297 */
1298 if (pCacheRec->depth > 512)
1299 {
1300 LogFlow(("CSAM: maximum calldepth reached for %RRv\n", pCurInstrGC));
1301 pCacheRec->depth--;
1302 return VINF_SUCCESS; //let's not go on forever
1303 }
1304
1305 Assert(!PATMIsPatchGCAddr(pVM, pCurInstrGC));
1306 csamR3CheckPageRecord(pVM, pCurInstrGC);
1307
1308 while(rc == VWRN_CONTINUE_ANALYSIS)
1309 {
1310 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1311 {
1312 if (pPage == NULL)
1313 {
1314 /* New address; let's take a look at it. */
1315 pPage = csamCreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1316 if (pPage == NULL)
1317 {
1318 rc = VERR_NO_MEMORY;
1319 goto done;
1320 }
1321 }
1322 }
1323 else
1324 {
1325 LogFlow(("Code at %RRv has been scanned before\n", pCurInstrGC));
1326 rc = VINF_SUCCESS;
1327 goto done;
1328 }
1329
1330 { /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
1331 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1332 if (pCurInstrHC == NULL)
1333 {
1334 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1335 rc = VERR_PATCHING_REFUSED;
1336 goto done;
1337 }
1338 Assert(VALID_PTR(pCurInstrHC));
1339
1340 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1341#ifdef DEBUG
1342 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, fCode32 ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1343 &cpu, &cbInstr, szOutput, sizeof(szOutput));
1344 if (RT_SUCCESS(rc2)) Log(("CSAM Analysis: %s", szOutput));
1345#else
1346 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, fCode32 ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1347 &cpu, &cbInstr, NULL, 0);
1348#endif
1349 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1350 }
1351 if (RT_FAILURE(rc2))
1352 {
1353 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1354 rc = VINF_SUCCESS;
1355 goto done;
1356 }
1357
1358 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, cbInstr);
1359
1360 csamMarkCode(pVM, pPage, pCurInstrGC, cbInstr, true);
1361
1362 RCPTRTYPE(uint8_t *) addr = 0;
1363 PCSAMPAGE pJmpPage = NULL;
1364
1365 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + cbInstr - 1))
1366 {
1367 if (!PGMGstIsPagePresent(pVCpu, pCurInstrGC + cbInstr - 1))
1368 {
1369 /// @todo fault in the page
1370 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1371 rc = VWRN_CONTINUE_ANALYSIS;
1372 goto next_please;
1373 }
1374 //all is fine, let's continue
1375 csamR3CheckPageRecord(pVM, pCurInstrGC + cbInstr - 1);
1376 }
1377 /*
1378 * If it's harmless, then don't bother checking it (the disasm tables had better be accurate!)
1379 */
1380 if ((cpu.pCurInstr->fOpType & ~DISOPTYPE_RRM_MASK) == DISOPTYPE_HARMLESS)
1381 {
1382 AssertMsg(pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage) == VWRN_CONTINUE_ANALYSIS, ("Instruction incorrectly marked harmless?!?!?\n"));
1383 rc = VWRN_CONTINUE_ANALYSIS;
1384 goto next_please;
1385 }
1386
1387#ifdef CSAM_ANALYSE_BEYOND_RET
1388 /* Remember the address of the instruction following the ret in case the parent instruction was a call. */
1389 if ( pCacheRec->pCallExitRec
1390 && cpu.pCurInstr->uOpcode == OP_RETN
1391 && pCacheRec->pCallExitRec->cInstrAfterRet < CSAM_MAX_CALLEXIT_RET)
1392 {
1393 pCacheRec->pCallExitRec->pInstrAfterRetGC[pCacheRec->pCallExitRec->cInstrAfterRet] = pCurInstrGC + cbInstr;
1394 pCacheRec->pCallExitRec->cInstrAfterRet++;
1395 }
1396#endif
1397
1398 rc = pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage);
1399 if (rc == VINF_SUCCESS)
1400 goto done;
1401
1402 // For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction)
1403 if ( ((cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J))
1404 || (cpu.pCurInstr->uOpcode == OP_CALL && cpu.Param1.fUse == DISUSE_DISPLACEMENT32)) /* simple indirect call (call dword ptr [address]) */
1405 {
1406 /* We need to parse 'call dword ptr [address]' type of calls to catch cpuid instructions in some recent Linux distributions (e.g. OpenSuse 10.3) */
1407 if ( cpu.pCurInstr->uOpcode == OP_CALL
1408 && cpu.Param1.fUse == DISUSE_DISPLACEMENT32)
1409 {
1410 addr = 0;
1411 PGMPhysSimpleReadGCPtr(pVCpu, &addr, (RTRCUINTPTR)cpu.Param1.uDisp.i32, sizeof(addr));
1412 }
1413 else
1414 addr = CSAMResolveBranch(&cpu, pCurInstrGC);
1415
1416 if (addr == 0)
1417 {
1418 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
1419 rc = VINF_SUCCESS;
1420 break;
1421 }
1422 Assert(!PATMIsPatchGCAddr(pVM, addr));
1423
1424 /* If the target address lies in a patch generated jump, then special action needs to be taken. */
1425 PATMR3DetectConflict(pVM, pCurInstrGC, addr);
1426
1427 /* Same page? */
1428 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pCurInstrGC ))
1429 {
1430 if (!PGMGstIsPagePresent(pVCpu, addr))
1431 {
1432 Log(("Page for current instruction %RRv is not present!!\n", addr));
1433 rc = VWRN_CONTINUE_ANALYSIS;
1434 goto next_please;
1435 }
1436
1437 /* All is fine, let's continue. */
1438 csamR3CheckPageRecord(pVM, addr);
1439 }
1440
1441 pJmpPage = NULL;
1442 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1443 {
1444 if (pJmpPage == NULL)
1445 {
1446 /* New branch target; let's take a look at it. */
1447 pJmpPage = csamCreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1448 if (pJmpPage == NULL)
1449 {
1450 rc = VERR_NO_MEMORY;
1451 goto done;
1452 }
1453 Assert(pPage);
1454 }
1455 if (cpu.pCurInstr->uOpcode == OP_CALL)
1456 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1457 else
1458 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1459
1460 if (rc != VINF_SUCCESS) {
1461 goto done;
1462 }
1463 }
1464 if (cpu.pCurInstr->uOpcode == OP_JMP)
1465 {//unconditional jump; return to caller
1466 rc = VINF_SUCCESS;
1467 goto done;
1468 }
1469
1470 rc = VWRN_CONTINUE_ANALYSIS;
1471 } //if ((cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J))
1472#ifdef CSAM_SCAN_JUMP_TABLE
1473 else
1474 if ( cpu.pCurInstr->uOpcode == OP_JMP
1475 && (cpu.Param1.fUse & (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)) == (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)
1476 )
1477 {
1478 RTRCPTR pJumpTableGC = (RTRCPTR)cpu.Param1.disp32;
1479 uint8_t *pJumpTableHC;
1480 int rc2;
1481
1482 Log(("Jump through jump table\n"));
1483
1484 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pJumpTableGC, (PRTHCPTR)&pJumpTableHC, missing page lock);
1485 if (rc2 == VINF_SUCCESS)
1486 {
1487 for (uint32_t i=0;i<2;i++)
1488 {
1489 uint64_t fFlags;
1490
1491 addr = pJumpTableGC + cpu.Param1.scale * i;
1492 /* Same page? */
1493 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pJumpTableGC))
1494 break;
1495
1496 addr = *(RTRCPTR *)(pJumpTableHC + cpu.Param1.scale * i);
1497
1498 rc2 = PGMGstGetPage(pVCpu, addr, &fFlags, NULL);
1499 if ( rc2 != VINF_SUCCESS
1500 || (fFlags & X86_PTE_US)
1501 || !(fFlags & X86_PTE_P)
1502 )
1503 break;
1504
1505 Log(("Jump to %RRv\n", addr));
1506
1507 pJmpPage = NULL;
1508 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1509 {
1510 if (pJmpPage == NULL)
1511 {
1512 /* New branch target; let's take a look at it. */
1513 pJmpPage = csamCreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1514 if (pJmpPage == NULL)
1515 {
1516 rc = VERR_NO_MEMORY;
1517 goto done;
1518 }
1519 Assert(pPage);
1520 }
1521 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1522 if (rc != VINF_SUCCESS) {
1523 goto done;
1524 }
1525 }
1526 }
1527 }
1528 }
1529#endif
1530 if (rc != VWRN_CONTINUE_ANALYSIS) {
1531 break; //done!
1532 }
1533next_please:
1534 if (cpu.pCurInstr->uOpcode == OP_JMP)
1535 {
1536 rc = VINF_SUCCESS;
1537 goto done;
1538 }
1539 pCurInstrGC += cbInstr;
1540 }
1541done:
1542 pCacheRec->depth--;
1543 return rc;
1544}
1545
1546
1547/**
1548 * Calculates the 64 bits hash value for the current page
1549 *
1550 * @returns hash value
1551 * @param pVM Pointer to the VM.
1552 * @param pInstr Page address
1553 */
1554uint64_t csamR3CalcPageHash(PVM pVM, RTRCPTR pInstr)
1555{
1556 uint64_t hash = 0;
1557 uint32_t val[5];
1558 int rc;
1559 Assert(pVM->cCpus == 1);
1560 PVMCPU pVCpu = VMMGetCpu0(pVM);
1561
1562 Assert((pInstr & PAGE_OFFSET_MASK) == 0);
1563
1564 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[0], pInstr, sizeof(val[0]));
1565 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1566 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1567 {
1568 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1569 return ~0ULL;
1570 }
1571
1572 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[1], pInstr+1024, sizeof(val[0]));
1573 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1574 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1575 {
1576 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1577 return ~0ULL;
1578 }
1579
1580 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[2], pInstr+2048, sizeof(val[0]));
1581 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1582 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1583 {
1584 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1585 return ~0ULL;
1586 }
1587
1588 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[3], pInstr+3072, sizeof(val[0]));
1589 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1590 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1591 {
1592 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1593 return ~0ULL;
1594 }
1595
1596 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[4], pInstr+4092, sizeof(val[0]));
1597 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1598 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1599 {
1600 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1601 return ~0ULL;
1602 }
1603
1604 // don't want to get division by zero traps
1605 val[2] |= 1;
1606 val[4] |= 1;
1607
1608 hash = (uint64_t)val[0] * (uint64_t)val[1] / (uint64_t)val[2] + (val[3]%val[4]);
1609 return (hash == ~0ULL) ? hash - 1 : hash;
1610}
1611
1612
1613/**
1614 * Notify CSAM of a page flush
1615 *
1616 * @returns VBox status code
1617 * @param pVM Pointer to the VM.
1618 * @param addr GC address of the page to flush
1619 * @param fRemovePage Page removal flag
1620 */
1621static int csamFlushPage(PVM pVM, RTRCPTR addr, bool fRemovePage)
1622{
1623 PCSAMPAGEREC pPageRec;
1624 int rc;
1625 RTGCPHYS GCPhys = 0;
1626 uint64_t fFlags = 0;
1627 Assert(pVM->cCpus == 1 || !CSAMIsEnabled(pVM));
1628
1629 if (!CSAMIsEnabled(pVM))
1630 return VINF_SUCCESS;
1631 Assert(!HMIsEnabled(pVM));
1632
1633 PVMCPU pVCpu = VMMGetCpu0(pVM);
1634
1635 STAM_PROFILE_START(&pVM->csam.s.StatTimeFlushPage, a);
1636
1637 addr = addr & PAGE_BASE_GC_MASK;
1638
1639 /*
1640 * Note: searching for the page in our tree first is more expensive (skipped flushes are two orders of magnitude more common)
1641 */
1642 if (pVM->csam.s.pPageTree == NULL)
1643 {
1644 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1645 return VWRN_CSAM_PAGE_NOT_FOUND;
1646 }
1647
1648 rc = PGMGstGetPage(pVCpu, addr, &fFlags, &GCPhys);
1649 /* Returned at a very early stage (no paging yet presumably). */
1650 if (rc == VERR_NOT_SUPPORTED)
1651 {
1652 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1653 return rc;
1654 }
1655
1656 if (RT_SUCCESS(rc))
1657 {
1658 if ( (fFlags & X86_PTE_US)
1659 || rc == VERR_PGM_PHYS_PAGE_RESERVED
1660 )
1661 {
1662 /* User page -> not relevant for us. */
1663 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1664 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1665 return VINF_SUCCESS;
1666 }
1667 }
1668 else
1669 if (rc != VERR_PAGE_NOT_PRESENT && rc != VERR_PAGE_TABLE_NOT_PRESENT)
1670 AssertMsgFailed(("PGMR3GetPage %RRv failed with %Rrc\n", addr, rc));
1671
1672 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
1673 if (pPageRec)
1674 {
1675 if ( GCPhys == pPageRec->page.GCPhys
1676 && (fFlags & X86_PTE_P))
1677 {
1678 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1679 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1680 return VINF_SUCCESS;
1681 }
1682
1683 Log(("CSAMR3FlushPage: page %RRv has changed -> FLUSH (rc=%Rrc) (Phys: %RGp vs %RGp)\n", addr, rc, GCPhys, pPageRec->page.GCPhys));
1684
1685 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushes, 1);
1686
1687 if (fRemovePage)
1688 csamRemovePageRecord(pVM, addr);
1689 else
1690 {
1691 CSAMMarkPage(pVM, addr, false);
1692 pPageRec->page.GCPhys = 0;
1693 pPageRec->page.fFlags = 0;
1694 rc = PGMGstGetPage(pVCpu, addr, &pPageRec->page.fFlags, &pPageRec->page.GCPhys);
1695 if (rc == VINF_SUCCESS)
1696 pPageRec->page.u64Hash = csamR3CalcPageHash(pVM, addr);
1697
1698 if (pPageRec->page.pBitmap == NULL)
1699 {
1700 pPageRec->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, CSAM_PAGE_BITMAP_SIZE);
1701 Assert(pPageRec->page.pBitmap);
1702 if (pPageRec->page.pBitmap == NULL)
1703 return VERR_NO_MEMORY;
1704 }
1705 else
1706 memset(pPageRec->page.pBitmap, 0, CSAM_PAGE_BITMAP_SIZE);
1707 }
1708
1709
1710 /*
1711 * Inform patch manager about the flush; no need to repeat the above check twice.
1712 */
1713 PATMR3FlushPage(pVM, addr);
1714
1715 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1716 return VINF_SUCCESS;
1717 }
1718 else
1719 {
1720 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1721 return VWRN_CSAM_PAGE_NOT_FOUND;
1722 }
1723}
1724
1725/**
1726 * Notify CSAM of a page flush
1727 *
1728 * @returns VBox status code
1729 * @param pVM Pointer to the VM.
1730 * @param addr GC address of the page to flush
1731 */
1732VMMR3_INT_DECL(int) CSAMR3FlushPage(PVM pVM, RTRCPTR addr)
1733{
1734 return csamFlushPage(pVM, addr, true /* remove page record */);
1735}
1736
1737/**
1738 * Remove a CSAM monitored page. Use with care!
1739 *
1740 * @returns VBox status code
1741 * @param pVM Pointer to the VM.
1742 * @param addr GC address of the page to flush
1743 */
1744VMMR3_INT_DECL(int) CSAMR3RemovePage(PVM pVM, RTRCPTR addr)
1745{
1746 PCSAMPAGEREC pPageRec;
1747 int rc;
1748
1749 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
1750
1751 addr = addr & PAGE_BASE_GC_MASK;
1752
1753 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
1754 if (pPageRec)
1755 {
1756 rc = csamRemovePageRecord(pVM, addr);
1757 if (RT_SUCCESS(rc))
1758 PATMR3FlushPage(pVM, addr);
1759 return VINF_SUCCESS;
1760 }
1761 return VWRN_CSAM_PAGE_NOT_FOUND;
1762}
1763
1764/**
1765 * Check a page record in case a page has been changed
1766 *
1767 * @returns VBox status code. (trap handled or not)
1768 * @param pVM Pointer to the VM.
1769 * @param pInstrGC GC instruction pointer
1770 */
1771int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstrGC)
1772{
1773 PCSAMPAGEREC pPageRec;
1774 uint64_t u64hash;
1775
1776 pInstrGC = pInstrGC & PAGE_BASE_GC_MASK;
1777
1778 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1779 if (pPageRec)
1780 {
1781 u64hash = csamR3CalcPageHash(pVM, pInstrGC);
1782 if (u64hash != pPageRec->page.u64Hash)
1783 csamFlushPage(pVM, pInstrGC, false /* don't remove page record */);
1784 }
1785 else
1786 return VWRN_CSAM_PAGE_NOT_FOUND;
1787
1788 return VINF_SUCCESS;
1789}
1790
1791/**
1792 * Returns monitor description based on CSAM tag
1793 *
1794 * @return description string
1795 * @param enmTag Owner tag
1796 */
1797const char *csamGetMonitorDescription(CSAMTAG enmTag)
1798{
1799 if (enmTag == CSAM_TAG_PATM)
1800 return "CSAM-PATM self-modifying code monitor handler";
1801 else
1802 if (enmTag == CSAM_TAG_REM)
1803 return "CSAM-REM self-modifying code monitor handler";
1804 Assert(enmTag == CSAM_TAG_CSAM);
1805 return "CSAM self-modifying code monitor handler";
1806}
1807
1808/**
1809 * Adds page record to our lookup tree
1810 *
1811 * @returns CSAMPAGE ptr or NULL if failure
1812 * @param pVM Pointer to the VM.
1813 * @param GCPtr Page address
1814 * @param enmTag Owner tag
1815 * @param fCode32 16 or 32 bits code
1816 * @param fMonitorInvalidation Monitor page invalidation flag
1817 */
1818static PCSAMPAGE csamCreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation)
1819{
1820 PCSAMPAGEREC pPage;
1821 int rc;
1822 bool ret;
1823 Assert(pVM->cCpus == 1);
1824 PVMCPU pVCpu = VMMGetCpu0(pVM);
1825
1826 Log(("New page record for %RRv\n", GCPtr & PAGE_BASE_GC_MASK));
1827
1828 pPage = (PCSAMPAGEREC)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, sizeof(CSAMPAGEREC));
1829 if (pPage == NULL)
1830 {
1831 AssertMsgFailed(("csamCreatePageRecord: Out of memory!!!!\n"));
1832 return NULL;
1833 }
1834 /* Round down to page boundary. */
1835 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1836 pPage->Core.Key = (AVLPVKEY)(uintptr_t)GCPtr;
1837 pPage->page.pPageGC = GCPtr;
1838 pPage->page.fCode32 = fCode32;
1839 pPage->page.fMonitorInvalidation = fMonitorInvalidation;
1840 pPage->page.enmTag = enmTag;
1841 pPage->page.fMonitorActive = false;
1842 pPage->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, PAGE_SIZE/sizeof(uint8_t));
1843 rc = PGMGstGetPage(pVCpu, GCPtr, &pPage->page.fFlags, &pPage->page.GCPhys);
1844 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1845
1846 pPage->page.u64Hash = csamR3CalcPageHash(pVM, GCPtr);
1847 ret = RTAvlPVInsert(&pVM->csam.s.pPageTree, &pPage->Core);
1848 Assert(ret);
1849
1850#ifdef CSAM_MONITOR_CODE_PAGES
1851 AssertRelease(!fInCSAMCodePageInvalidate);
1852
1853 switch (enmTag)
1854 {
1855 case CSAM_TAG_PATM:
1856 case CSAM_TAG_REM:
1857#ifdef CSAM_MONITOR_CSAM_CODE_PAGES
1858 case CSAM_TAG_CSAM:
1859#endif
1860 {
1861 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtr, GCPtr + (PAGE_SIZE - 1) /* inclusive! */,
1862 (fMonitorInvalidation) ? CSAMCodePageInvalidate : 0, CSAMCodePageWriteHandler, "CSAMGCCodePageWriteHandler", 0,
1863 csamGetMonitorDescription(enmTag));
1864 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT, ("PGMR3HandlerVirtualRegisterEx %RRv failed with %Rrc\n", GCPtr, rc));
1865 if (RT_FAILURE(rc))
1866 Log(("PGMR3HandlerVirtualRegisterEx for %RRv failed with %Rrc\n", GCPtr, rc));
1867
1868 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
1869
1870 /* Prefetch it in case it's not there yet. */
1871 rc = PGMPrefetchPage(pVCpu, GCPtr);
1872 AssertRC(rc);
1873
1874 rc = PGMShwMakePageReadonly(pVCpu, GCPtr, 0 /*fFlags*/);
1875 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1876
1877 pPage->page.fMonitorActive = true;
1878 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
1879 break;
1880 }
1881 default:
1882 break; /* to shut up GCC */
1883 }
1884
1885 Log(("csamCreatePageRecord %RRv GCPhys=%RGp\n", GCPtr, pPage->page.GCPhys));
1886
1887#ifdef VBOX_WITH_STATISTICS
1888 switch (enmTag)
1889 {
1890 case CSAM_TAG_CSAM:
1891 STAM_COUNTER_INC(&pVM->csam.s.StatPageCSAM);
1892 break;
1893 case CSAM_TAG_PATM:
1894 STAM_COUNTER_INC(&pVM->csam.s.StatPagePATM);
1895 break;
1896 case CSAM_TAG_REM:
1897 STAM_COUNTER_INC(&pVM->csam.s.StatPageREM);
1898 break;
1899 default:
1900 break; /* to shut up GCC */
1901 }
1902#endif
1903
1904#endif
1905
1906 STAM_COUNTER_INC(&pVM->csam.s.StatNrPages);
1907 if (fMonitorInvalidation)
1908 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
1909
1910 return &pPage->page;
1911}
1912
1913/**
1914 * Monitors a code page (if not already monitored)
1915 *
1916 * @returns VBox status code
1917 * @param pVM Pointer to the VM.
1918 * @param pPageAddrGC The page to monitor
1919 * @param enmTag Monitor tag
1920 */
1921VMMR3DECL(int) CSAMR3MonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
1922{
1923 PCSAMPAGEREC pPageRec = NULL;
1924 int rc;
1925 bool fMonitorInvalidation;
1926 Assert(pVM->cCpus == 1);
1927 PVMCPU pVCpu = VMMGetCpu0(pVM);
1928 Assert(!HMIsEnabled(pVM));
1929
1930 /* Dirty pages must be handled before calling this function!. */
1931 Assert(!pVM->csam.s.cDirtyPages);
1932
1933 if (pVM->csam.s.fScanningStarted == false)
1934 return VINF_SUCCESS; /* too early */
1935
1936 pPageAddrGC &= PAGE_BASE_GC_MASK;
1937
1938 Log(("CSAMR3MonitorPage %RRv %d\n", pPageAddrGC, enmTag));
1939
1940 /** @todo implicit assumption */
1941 fMonitorInvalidation = (enmTag == CSAM_TAG_PATM);
1942
1943 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
1944 if (pPageRec == NULL)
1945 {
1946 uint64_t fFlags;
1947
1948 rc = PGMGstGetPage(pVCpu, pPageAddrGC, &fFlags, NULL);
1949 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1950 if ( rc == VINF_SUCCESS
1951 && (fFlags & X86_PTE_US))
1952 {
1953 /* We don't care about user pages. */
1954 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
1955 return VINF_SUCCESS;
1956 }
1957
1958 csamCreatePageRecord(pVM, pPageAddrGC, enmTag, true /* 32 bits code */, fMonitorInvalidation);
1959
1960 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
1961 Assert(pPageRec);
1962 }
1963 /** @todo reference count */
1964
1965#ifdef CSAM_MONITOR_CSAM_CODE_PAGES
1966 Assert(pPageRec->page.fMonitorActive);
1967#endif
1968
1969#ifdef CSAM_MONITOR_CODE_PAGES
1970 if (!pPageRec->page.fMonitorActive)
1971 {
1972 Log(("CSAMR3MonitorPage: activate monitoring for %RRv\n", pPageAddrGC));
1973
1974 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, pPageAddrGC, pPageAddrGC + (PAGE_SIZE - 1) /* inclusive! */,
1975 (fMonitorInvalidation) ? CSAMCodePageInvalidate : 0, CSAMCodePageWriteHandler, "CSAMGCCodePageWriteHandler", 0,
1976 csamGetMonitorDescription(enmTag));
1977 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT, ("PGMR3HandlerVirtualRegisterEx %RRv failed with %Rrc\n", pPageAddrGC, rc));
1978 if (RT_FAILURE(rc))
1979 Log(("PGMR3HandlerVirtualRegisterEx for %RRv failed with %Rrc\n", pPageAddrGC, rc));
1980
1981 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
1982
1983 /* Prefetch it in case it's not there yet. */
1984 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
1985 AssertRC(rc);
1986
1987 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
1988 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1989
1990 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
1991
1992 pPageRec->page.fMonitorActive = true;
1993 pPageRec->page.fMonitorInvalidation = fMonitorInvalidation;
1994 }
1995 else
1996 if ( !pPageRec->page.fMonitorInvalidation
1997 && fMonitorInvalidation)
1998 {
1999 Assert(pPageRec->page.fMonitorActive);
2000 PGMHandlerVirtualChangeInvalidateCallback(pVM, pPageRec->page.pPageGC, CSAMCodePageInvalidate);
2001 pPageRec->page.fMonitorInvalidation = true;
2002 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
2003
2004 /* Prefetch it in case it's not there yet. */
2005 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2006 AssertRC(rc);
2007
2008 /* Make sure it's readonly. Page invalidation may have modified the attributes. */
2009 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2010 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2011 }
2012
2013#if 0 /* def VBOX_STRICT -> very annoying) */
2014 if (pPageRec->page.fMonitorActive)
2015 {
2016 uint64_t fPageShw;
2017 RTHCPHYS GCPhys;
2018 rc = PGMShwGetPage(pVCpu, pPageAddrGC, &fPageShw, &GCPhys);
2019// AssertMsg( (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
2020// || !(fPageShw & X86_PTE_RW)
2021// || (pPageRec->page.GCPhys == 0), ("Shadow page flags for %RRv (%RHp) aren't readonly (%RX64)!!\n", pPageAddrGC, GCPhys, fPageShw));
2022 }
2023#endif
2024
2025 if (pPageRec->page.GCPhys == 0)
2026 {
2027 /* Prefetch it in case it's not there yet. */
2028 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2029 AssertRC(rc);
2030 /* The page was changed behind our back. It won't be made read-only until the next SyncCR3, so force it here. */
2031 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2032 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2033 }
2034#endif /* CSAM_MONITOR_CODE_PAGES */
2035 return VINF_SUCCESS;
2036}
2037
2038/**
2039 * Unmonitors a code page
2040 *
2041 * @returns VBox status code
2042 * @param pVM Pointer to the VM.
2043 * @param pPageAddrGC The page to monitor
2044 * @param enmTag Monitor tag
2045 */
2046VMMR3DECL(int) CSAMR3UnmonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
2047{
2048 Assert(!HMIsEnabled(pVM));
2049
2050 pPageAddrGC &= PAGE_BASE_GC_MASK;
2051
2052 Log(("CSAMR3UnmonitorPage %RRv %d\n", pPageAddrGC, enmTag));
2053
2054 Assert(enmTag == CSAM_TAG_REM);
2055
2056#ifdef VBOX_STRICT
2057 PCSAMPAGEREC pPageRec;
2058
2059 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
2060 Assert(pPageRec && pPageRec->page.enmTag == enmTag);
2061#endif
2062 return CSAMR3RemovePage(pVM, pPageAddrGC);
2063}
2064
2065/**
2066 * Removes a page record from our lookup tree
2067 *
2068 * @returns VBox status code
2069 * @param pVM Pointer to the VM.
2070 * @param GCPtr Page address
2071 */
2072static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr)
2073{
2074 PCSAMPAGEREC pPageRec;
2075 Assert(pVM->cCpus == 1);
2076 PVMCPU pVCpu = VMMGetCpu0(pVM);
2077
2078 Log(("csamRemovePageRecord %RRv\n", GCPtr));
2079 pPageRec = (PCSAMPAGEREC)RTAvlPVRemove(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)GCPtr);
2080
2081 if (pPageRec)
2082 {
2083 STAM_COUNTER_INC(&pVM->csam.s.StatNrRemovedPages);
2084
2085#ifdef CSAM_MONITOR_CODE_PAGES
2086 if (pPageRec->page.fMonitorActive)
2087 {
2088 /* @todo -> this is expensive (cr3 reload)!!!
2089 * if this happens often, then reuse it instead!!!
2090 */
2091 Assert(!fInCSAMCodePageInvalidate);
2092 STAM_COUNTER_DEC(&pVM->csam.s.StatPageMonitor);
2093 PGMHandlerVirtualDeregister(pVM, GCPtr);
2094 }
2095 if (pPageRec->page.enmTag == CSAM_TAG_PATM)
2096 {
2097 /* Make sure the recompiler flushes its cache as this page is no longer monitored. */
2098 STAM_COUNTER_INC(&pVM->csam.s.StatPageRemoveREMFlush);
2099 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2100 }
2101#endif
2102
2103#ifdef VBOX_WITH_STATISTICS
2104 switch (pPageRec->page.enmTag)
2105 {
2106 case CSAM_TAG_CSAM:
2107 STAM_COUNTER_DEC(&pVM->csam.s.StatPageCSAM);
2108 break;
2109 case CSAM_TAG_PATM:
2110 STAM_COUNTER_DEC(&pVM->csam.s.StatPagePATM);
2111 break;
2112 case CSAM_TAG_REM:
2113 STAM_COUNTER_DEC(&pVM->csam.s.StatPageREM);
2114 break;
2115 default:
2116 break; /* to shut up GCC */
2117 }
2118#endif
2119
2120 if (pPageRec->page.pBitmap) MMR3HeapFree(pPageRec->page.pBitmap);
2121 MMR3HeapFree(pPageRec);
2122 }
2123 else
2124 AssertFailed();
2125
2126 return VINF_SUCCESS;
2127}
2128
2129/**
2130 * Callback for delayed writes from non-EMT threads
2131 *
2132 * @param pVM Pointer to the VM.
2133 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
2134 * @param cbBuf How much it's reading/writing.
2135 */
2136static DECLCALLBACK(void) CSAMDelayedWriteHandler(PVM pVM, RTRCPTR GCPtr, size_t cbBuf)
2137{
2138 int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
2139 AssertRC(rc);
2140}
2141
2142/**
2143 * \#PF Handler callback for virtual access handler ranges.
2144 *
2145 * Important to realize that a physical page in a range can have aliases, and
2146 * for ALL and WRITE handlers these will also trigger.
2147 *
2148 * @returns VINF_SUCCESS if the handler have carried out the operation.
2149 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
2150 * @param pVM Pointer to the VM.
2151 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
2152 * @param pvPtr The HC mapping of that address.
2153 * @param pvBuf What the guest is reading/writing.
2154 * @param cbBuf How much it's reading/writing.
2155 * @param enmAccessType The access type.
2156 * @param pvUser User argument.
2157 */
2158static DECLCALLBACK(int) CSAMCodePageWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
2159{
2160 int rc;
2161
2162 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
2163 Log(("CSAMCodePageWriteHandler: write to %RGv size=%zu\n", GCPtr, cbBuf));
2164 NOREF(pvUser);
2165
2166 if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
2167 && !memcmp(pvPtr, pvBuf, cbBuf))
2168 {
2169 Log(("CSAMCodePageWriteHandler: dummy write -> ignore\n"));
2170 return VINF_PGM_HANDLER_DO_DEFAULT;
2171 }
2172
2173 if (VM_IS_EMT(pVM))
2174 rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
2175 else
2176 {
2177 /* Queue the write instead otherwise we'll get concurrency issues. */
2178 /** @note in theory not correct to let it write the data first before disabling a patch!
2179 * (if it writes the same data as the patch jump and we replace it with obsolete opcodes)
2180 */
2181 Log(("CSAMCodePageWriteHandler: delayed write!\n"));
2182 AssertCompileSize(RTRCPTR, 4);
2183 rc = VMR3ReqCallVoidNoWait(pVM, VMCPUID_ANY, (PFNRT)CSAMDelayedWriteHandler, 3, pVM, (RTRCPTR)GCPtr, cbBuf);
2184 }
2185 AssertRC(rc);
2186
2187 return VINF_PGM_HANDLER_DO_DEFAULT;
2188}
2189
2190/**
2191 * \#PF Handler callback for invalidation of virtual access handler ranges.
2192 *
2193 * @param pVM Pointer to the VM.
2194 * @param GCPtr The virtual address the guest has changed.
2195 */
2196static DECLCALLBACK(int) CSAMCodePageInvalidate(PVM pVM, RTGCPTR GCPtr)
2197{
2198 fInCSAMCodePageInvalidate = true;
2199 LogFlow(("CSAMCodePageInvalidate %RGv\n", GCPtr));
2200 /** @todo We can't remove the page (which unregisters the virtual handler) as we are called from a DoWithAll on the virtual handler tree. Argh. */
2201 csamFlushPage(pVM, GCPtr, false /* don't remove page! */);
2202 fInCSAMCodePageInvalidate = false;
2203 return VINF_SUCCESS;
2204}
2205
2206/**
2207 * Check if the current instruction has already been checked before
2208 *
2209 * @returns VBox status code. (trap handled or not)
2210 * @param pVM Pointer to the VM.
2211 * @param pInstr Instruction pointer
2212 * @param pPage CSAM patch structure pointer
2213 */
2214bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage)
2215{
2216 PCSAMPAGEREC pPageRec;
2217 uint32_t offset;
2218
2219 STAM_PROFILE_START(&pVM->csam.s.StatTimeCheckAddr, a);
2220
2221 offset = pInstr & PAGE_OFFSET_MASK;
2222 pInstr = pInstr & PAGE_BASE_GC_MASK;
2223
2224 Assert(pPage);
2225
2226 if (*pPage && (*pPage)->pPageGC == pInstr)
2227 {
2228 if ((*pPage)->pBitmap == NULL || ASMBitTest((*pPage)->pBitmap, offset))
2229 {
2230 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2231 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2232 return true;
2233 }
2234 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2235 return false;
2236 }
2237
2238 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pInstr);
2239 if (pPageRec)
2240 {
2241 if (pPage) *pPage= &pPageRec->page;
2242 if (pPageRec->page.pBitmap == NULL || ASMBitTest(pPageRec->page.pBitmap, offset))
2243 {
2244 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2245 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2246 return true;
2247 }
2248 }
2249 else
2250 {
2251 if (pPage) *pPage = NULL;
2252 }
2253 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2254 return false;
2255}
2256
2257/**
2258 * Mark an instruction in a page as scanned/not scanned
2259 *
2260 * @param pVM Pointer to the VM.
2261 * @param pPage Patch structure pointer
2262 * @param pInstr Instruction pointer
2263 * @param cbInstr Instruction size
2264 * @param fScanned Mark as scanned or not
2265 */
2266static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
2267{
2268 LogFlow(("csamMarkCodeAsScanned %RRv cbInstr=%d\n", pInstr, cbInstr));
2269 CSAMMarkPage(pVM, pInstr, fScanned);
2270
2271 /** @todo should recreate empty bitmap if !fScanned */
2272 if (pPage->pBitmap == NULL)
2273 return;
2274
2275 if (fScanned)
2276 {
2277 // retn instructions can be scanned more than once
2278 if (ASMBitTest(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK) == 0)
2279 {
2280 pPage->uSize += cbInstr;
2281 STAM_COUNTER_ADD(&pVM->csam.s.StatNrInstr, 1);
2282 }
2283 if (pPage->uSize >= PAGE_SIZE)
2284 {
2285 Log(("Scanned full page (%RRv) -> free bitmap\n", pInstr & PAGE_BASE_GC_MASK));
2286 MMR3HeapFree(pPage->pBitmap);
2287 pPage->pBitmap = NULL;
2288 }
2289 else
2290 ASMBitSet(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2291 }
2292 else
2293 ASMBitClear(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2294}
2295
2296/**
2297 * Mark an instruction in a page as scanned/not scanned
2298 *
2299 * @returns VBox status code.
2300 * @param pVM Pointer to the VM.
2301 * @param pInstr Instruction pointer
2302 * @param cbInstr Instruction size
2303 * @param fScanned Mark as scanned or not
2304 */
2305VMMR3_INT_DECL(int) CSAMR3MarkCode(PVM pVM, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
2306{
2307 PCSAMPAGE pPage = 0;
2308
2309 Assert(!fScanned); /* other case not implemented. */
2310 Assert(!PATMIsPatchGCAddr(pVM, pInstr));
2311 Assert(!HMIsEnabled(pVM));
2312
2313 if (csamIsCodeScanned(pVM, pInstr, &pPage) == false)
2314 {
2315 Assert(fScanned == true); /* other case should not be possible */
2316 return VINF_SUCCESS;
2317 }
2318
2319 Log(("CSAMR3MarkCode: %RRv size=%d fScanned=%d\n", pInstr, cbInstr, fScanned));
2320 csamMarkCode(pVM, pPage, pInstr, cbInstr, fScanned);
2321 return VINF_SUCCESS;
2322}
2323
2324
2325/**
2326 * Scan and analyse code
2327 *
2328 * @returns VBox status code.
2329 * @param pVM Pointer to the VM.
2330 * @param pCtxCore CPU context
2331 * @param pInstrGC Instruction pointer
2332 */
2333VMMR3_INT_DECL(int) CSAMR3CheckCodeEx(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR pInstrGC)
2334{
2335 Assert(!HMIsEnabled(pVM));
2336 if (EMIsRawRing0Enabled(pVM) == false || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2337 {
2338 // No use
2339 return VINF_SUCCESS;
2340 }
2341
2342 if (CSAMIsEnabled(pVM))
2343 {
2344 /* Assuming 32 bits code for now. */
2345 Assert(CPUMGetGuestCodeBits(VMMGetCpu0(pVM)) == 32);
2346
2347 pInstrGC = SELMToFlat(pVM, DISSELREG_CS, pCtxCore, pInstrGC);
2348 return CSAMR3CheckCode(pVM, pInstrGC);
2349 }
2350 return VINF_SUCCESS;
2351}
2352
2353/**
2354 * Scan and analyse code
2355 *
2356 * @returns VBox status code.
2357 * @param pVM Pointer to the VM.
2358 * @param pInstrGC Instruction pointer (0:32 virtual address)
2359 */
2360VMMR3_INT_DECL(int) CSAMR3CheckCode(PVM pVM, RTRCPTR pInstrGC)
2361{
2362 int rc;
2363 PCSAMPAGE pPage = NULL;
2364 Assert(!HMIsEnabled(pVM));
2365
2366 if ( EMIsRawRing0Enabled(pVM) == false
2367 || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2368 {
2369 /* Not active. */
2370 return VINF_SUCCESS;
2371 }
2372
2373 if (CSAMIsEnabled(pVM))
2374 {
2375 /* Cache record for csamR3GCVirtToHCVirt */
2376 CSAMP2GLOOKUPREC cacheRec;
2377 RT_ZERO(cacheRec);
2378
2379 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2380 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, pInstrGC, true /* 32 bits code */, CSAMR3AnalyseCallback, pPage, &cacheRec);
2381 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2382 if (cacheRec.Lock.pvMap)
2383 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2384
2385 if (rc != VINF_SUCCESS)
2386 {
2387 Log(("csamAnalyseCodeStream failed with %d\n", rc));
2388 return rc;
2389 }
2390 }
2391 return VINF_SUCCESS;
2392}
2393
2394/**
2395 * Flush dirty code pages
2396 *
2397 * @returns VBox status code.
2398 * @param pVM Pointer to the VM.
2399 */
2400static int csamR3FlushDirtyPages(PVM pVM)
2401{
2402 Assert(pVM->cCpus == 1);
2403 PVMCPU pVCpu = VMMGetCpu0(pVM);
2404
2405 STAM_PROFILE_START(&pVM->csam.s.StatFlushDirtyPages, a);
2406
2407 for (uint32_t i=0;i<pVM->csam.s.cDirtyPages;i++)
2408 {
2409 int rc;
2410 PCSAMPAGEREC pPageRec;
2411 RTRCPTR GCPtr = pVM->csam.s.pvDirtyBasePage[i];
2412
2413 GCPtr = GCPtr & PAGE_BASE_GC_MASK;
2414
2415#ifdef VBOX_WITH_REM
2416 /* Notify the recompiler that this page has been changed. */
2417 REMR3NotifyCodePageChanged(pVM, pVCpu, GCPtr);
2418#endif
2419
2420 /* Enable write protection again. (use the fault address as it might be an alias) */
2421 rc = PGMShwMakePageReadonly(pVCpu, pVM->csam.s.pvDirtyFaultPage[i], 0 /*fFlags*/);
2422 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2423
2424 Log(("CSAMR3FlushDirtyPages: flush %RRv (modifypage rc=%Rrc)\n", pVM->csam.s.pvDirtyBasePage[i], rc));
2425
2426 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)GCPtr);
2427 if (pPageRec && pPageRec->page.enmTag == CSAM_TAG_REM)
2428 {
2429 uint64_t fFlags;
2430
2431 rc = PGMGstGetPage(pVCpu, GCPtr, &fFlags, NULL);
2432 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2433 if ( rc == VINF_SUCCESS
2434 && (fFlags & X86_PTE_US))
2435 {
2436 /* We don't care about user pages. */
2437 csamRemovePageRecord(pVM, GCPtr);
2438 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
2439 }
2440 }
2441 }
2442 pVM->csam.s.cDirtyPages = 0;
2443 STAM_PROFILE_STOP(&pVM->csam.s.StatFlushDirtyPages, a);
2444 return VINF_SUCCESS;
2445}
2446
2447/**
2448 * Flush potential new code pages
2449 *
2450 * @returns VBox status code.
2451 * @param pVM Pointer to the VM.
2452 */
2453static int csamR3FlushCodePages(PVM pVM)
2454{
2455 Assert(pVM->cCpus == 1);
2456 PVMCPU pVCpu = VMMGetCpu0(pVM);
2457
2458 for (uint32_t i=0;i<pVM->csam.s.cPossibleCodePages;i++)
2459 {
2460 RTRCPTR GCPtr = pVM->csam.s.pvPossibleCodePage[i];
2461
2462 GCPtr = GCPtr & PAGE_BASE_GC_MASK;
2463
2464 Log(("csamR3FlushCodePages: %RRv\n", GCPtr));
2465 PGMShwMakePageNotPresent(pVCpu, GCPtr, 0 /*fFlags*/);
2466 /* Resync the page to make sure instruction fetch will fault */
2467 CSAMMarkPage(pVM, GCPtr, false);
2468 }
2469 pVM->csam.s.cPossibleCodePages = 0;
2470 return VINF_SUCCESS;
2471}
2472
2473/**
2474 * Perform any pending actions
2475 *
2476 * @returns VBox status code.
2477 * @param pVM Pointer to the VM.
2478 * @param pVCpu Pointer to the VMCPU.
2479 */
2480VMMR3_INT_DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
2481{
2482 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
2483
2484 csamR3FlushDirtyPages(pVM);
2485 csamR3FlushCodePages(pVM);
2486
2487 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
2488 return VINF_SUCCESS;
2489}
2490
2491/**
2492 * Analyse interrupt and trap gates
2493 *
2494 * @returns VBox status code.
2495 * @param pVM Pointer to the VM.
2496 * @param iGate Start gate
2497 * @param cGates Number of gates to check
2498 */
2499VMMR3_INT_DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
2500{
2501#ifdef VBOX_WITH_RAW_MODE
2502 Assert(pVM->cCpus == 1);
2503 PVMCPU pVCpu = VMMGetCpu0(pVM);
2504 uint16_t cbIDT;
2505 RTRCPTR GCPtrIDT = CPUMGetGuestIDTR(pVCpu, &cbIDT);
2506 uint32_t iGateEnd;
2507 uint32_t maxGates;
2508 VBOXIDTE aIDT[256];
2509 PVBOXIDTE pGuestIdte;
2510 int rc;
2511
2512 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
2513 if (EMIsRawRing0Enabled(pVM) == false)
2514 {
2515 /* Enabling interrupt gates only works when raw ring 0 is enabled. */
2516 //AssertFailed();
2517 return VINF_SUCCESS;
2518 }
2519
2520 /* We only check all gates once during a session */
2521 if ( !pVM->csam.s.fGatesChecked
2522 && cGates != 256)
2523 return VINF_SUCCESS; /* too early */
2524
2525 /* We only check all gates once during a session */
2526 if ( pVM->csam.s.fGatesChecked
2527 && cGates != 1)
2528 return VINF_SUCCESS; /* ignored */
2529
2530 Assert(cGates <= 256);
2531 if (!GCPtrIDT || cGates > 256)
2532 return VERR_INVALID_PARAMETER;
2533
2534 if (cGates != 1)
2535 {
2536 pVM->csam.s.fGatesChecked = true;
2537 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2538 {
2539 RTRCPTR pHandler = pVM->csam.s.pvCallInstruction[i];
2540
2541 if (pHandler)
2542 {
2543 PCSAMPAGE pPage = NULL;
2544 CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
2545 RT_ZERO(cacheRec);
2546
2547 Log(("CSAMCheckGates: checking previous call instruction %RRv\n", pHandler));
2548 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2549 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2550 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2551 if (cacheRec.Lock.pvMap)
2552 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2553
2554 if (rc != VINF_SUCCESS)
2555 {
2556 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2557 continue;
2558 }
2559 }
2560 }
2561 }
2562
2563 /* Determine valid upper boundary. */
2564 maxGates = (cbIDT+1) / sizeof(VBOXIDTE);
2565 Assert(iGate < maxGates);
2566 if (iGate > maxGates)
2567 return VERR_INVALID_PARAMETER;
2568
2569 if (iGate + cGates > maxGates)
2570 cGates = maxGates - iGate;
2571
2572 GCPtrIDT = GCPtrIDT + iGate * sizeof(VBOXIDTE);
2573 iGateEnd = iGate + cGates;
2574
2575 STAM_PROFILE_START(&pVM->csam.s.StatCheckGates, a);
2576
2577 /*
2578 * Get IDT entries.
2579 */
2580 rc = PGMPhysSimpleReadGCPtr(pVCpu, aIDT, GCPtrIDT, cGates*sizeof(VBOXIDTE));
2581 if (RT_FAILURE(rc))
2582 {
2583 AssertMsgRC(rc, ("Failed to read IDTE! rc=%Rrc\n", rc));
2584 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2585 return rc;
2586 }
2587 pGuestIdte = &aIDT[0];
2588
2589 for (/*iGate*/; iGate<iGateEnd; iGate++, pGuestIdte++)
2590 {
2591 Assert(TRPMR3GetGuestTrapHandler(pVM, iGate) == TRPM_INVALID_HANDLER);
2592
2593 if ( pGuestIdte->Gen.u1Present
2594 && (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32 || pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32)
2595 && (pGuestIdte->Gen.u2DPL == 3 || pGuestIdte->Gen.u2DPL == 0)
2596 )
2597 {
2598 RTRCPTR pHandler;
2599 PCSAMPAGE pPage = NULL;
2600 DBGFSELINFO selInfo;
2601 CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
2602 RT_ZERO(cacheRec);
2603
2604 pHandler = VBOXIDTE_OFFSET(*pGuestIdte);
2605 pHandler = SELMToFlatBySel(pVM, pGuestIdte->Gen.u16SegSel, pHandler);
2606
2607 rc = SELMR3GetSelectorInfo(pVM, pVCpu, pGuestIdte->Gen.u16SegSel, &selInfo);
2608 if ( RT_FAILURE(rc)
2609 || (selInfo.fFlags & (DBGFSELINFO_FLAGS_NOT_PRESENT | DBGFSELINFO_FLAGS_INVALID))
2610 || selInfo.GCPtrBase != 0
2611 || selInfo.cbLimit != ~0U
2612 )
2613 {
2614 /* Refuse to patch a handler whose idt cs selector isn't wide open. */
2615 Log(("CSAMCheckGates: check gate %d failed due to rc %Rrc GCPtrBase=%RRv limit=%x\n", iGate, rc, selInfo.GCPtrBase, selInfo.cbLimit));
2616 continue;
2617 }
2618
2619
2620 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2621 {
2622 Log(("CSAMCheckGates: check trap gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2623 }
2624 else
2625 {
2626 Log(("CSAMCheckGates: check interrupt gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2627 }
2628
2629 STAM_PROFILE_START(&pVM->csam.s.StatTime, b);
2630 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2631 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, b);
2632 if (cacheRec.Lock.pvMap)
2633 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2634
2635 if (rc != VINF_SUCCESS)
2636 {
2637 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2638 continue;
2639 }
2640 /* OpenBSD guest specific patch test. */
2641 if (iGate >= 0x20)
2642 {
2643 PCPUMCTX pCtx;
2644 DISCPUSTATE cpu;
2645 RTGCUINTPTR32 aOpenBsdPushCSOffset[3] = {0x03, /* OpenBSD 3.7 & 3.8 */
2646 0x2B, /* OpenBSD 4.0 installation ISO */
2647 0x2F}; /* OpenBSD 4.0 after install */
2648
2649 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2650
2651 for (unsigned i=0;i<RT_ELEMENTS(aOpenBsdPushCSOffset);i++)
2652 {
2653 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pHandler - aOpenBsdPushCSOffset[i], &cpu, NULL);
2654 if ( rc == VINF_SUCCESS
2655 && cpu.pCurInstr->uOpcode == OP_PUSH
2656 && cpu.pCurInstr->fParam1 == OP_PARM_REG_CS)
2657 {
2658 rc = PATMR3InstallPatch(pVM, pHandler - aOpenBsdPushCSOffset[i], PATMFL_CODE32 | PATMFL_GUEST_SPECIFIC);
2659 if (RT_SUCCESS(rc))
2660 Log(("Installed OpenBSD interrupt handler prefix instruction (push cs) patch\n"));
2661 }
2662 }
2663 }
2664
2665 /* Trap gates and certain interrupt gates. */
2666 uint32_t fPatchFlags = PATMFL_CODE32 | PATMFL_IDTHANDLER;
2667
2668 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2669 fPatchFlags |= PATMFL_TRAPHANDLER;
2670 else
2671 fPatchFlags |= PATMFL_INTHANDLER;
2672
2673 switch (iGate) {
2674 case 8:
2675 case 10:
2676 case 11:
2677 case 12:
2678 case 13:
2679 case 14:
2680 case 17:
2681 fPatchFlags |= PATMFL_TRAPHANDLER_WITH_ERRORCODE;
2682 break;
2683 default:
2684 /* No error code. */
2685 break;
2686 }
2687
2688 Log(("Installing %s gate handler for 0x%X at %RRv\n", (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32) ? "trap" : "intr", iGate, pHandler));
2689
2690 rc = PATMR3InstallPatch(pVM, pHandler, fPatchFlags);
2691 if ( RT_SUCCESS(rc)
2692 || rc == VERR_PATM_ALREADY_PATCHED)
2693 {
2694 Log(("Gate handler 0x%X is SAFE!\n", iGate));
2695
2696 RTRCPTR pNewHandlerGC = PATMR3QueryPatchGCPtr(pVM, pHandler);
2697 if (pNewHandlerGC)
2698 {
2699 rc = TRPMR3SetGuestTrapHandler(pVM, iGate, pNewHandlerGC);
2700 if (RT_FAILURE(rc))
2701 Log(("TRPMR3SetGuestTrapHandler %d failed with %Rrc\n", iGate, rc));
2702 }
2703 }
2704 }
2705 } /* for */
2706 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2707#endif /* VBOX_WITH_RAW_MODE */
2708 return VINF_SUCCESS;
2709}
2710
2711/**
2712 * Record previous call instruction addresses
2713 *
2714 * @returns VBox status code.
2715 * @param pVM Pointer to the VM.
2716 * @param GCPtrCall Call address
2717 */
2718VMMR3DECL(int) CSAMR3RecordCallAddress(PVM pVM, RTRCPTR GCPtrCall)
2719{
2720 Assert(!HMIsEnabled(pVM));
2721 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2722 {
2723 if (pVM->csam.s.pvCallInstruction[i] == GCPtrCall)
2724 return VINF_SUCCESS;
2725 }
2726
2727 Log(("CSAMR3RecordCallAddress %RRv\n", GCPtrCall));
2728
2729 pVM->csam.s.pvCallInstruction[pVM->csam.s.iCallInstruction++] = GCPtrCall;
2730 if (pVM->csam.s.iCallInstruction >= RT_ELEMENTS(pVM->csam.s.pvCallInstruction))
2731 pVM->csam.s.iCallInstruction = 0;
2732
2733 return VINF_SUCCESS;
2734}
2735
2736
2737/**
2738 * Query CSAM state (enabled/disabled)
2739 *
2740 * @returns true if enabled, false otherwise.
2741 * @param pUVM The user mode VM handle.
2742 */
2743VMMR3DECL(bool) CSAMR3IsEnabled(PUVM pUVM)
2744{
2745 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2746 PVM pVM = pUVM->pVM;
2747 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2748 return CSAMIsEnabled(pVM);
2749}
2750
2751
2752/**
2753 * Enables or disables code scanning.
2754 *
2755 * @returns VBox status code.
2756 * @param pUVM The user mode VM handle.
2757 * @param fEnabled Whether to enable or disable scanning.
2758 */
2759VMMR3DECL(int) CSAMR3SetScanningEnabled(PUVM pUVM, bool fEnabled)
2760{
2761 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2762 PVM pVM = pUVM->pVM;
2763 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2764
2765 if (HMIsEnabled(pVM))
2766 {
2767 Assert(!pVM->fCSAMEnabled);
2768 return VINF_SUCCESS;
2769 }
2770
2771 int rc;
2772 if (fEnabled)
2773 rc = CSAMEnableScanning(pVM);
2774 else
2775 rc = CSAMDisableScanning(pVM);
2776 return rc;
2777}
2778
2779
2780#ifdef VBOX_WITH_DEBUGGER
2781
2782/**
2783 * @callback_method_impl{FNDBGCCMD, The '.csamoff' command.}
2784 */
2785static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2786{
2787 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
2788 NOREF(cArgs); NOREF(paArgs);
2789
2790 if (HMR3IsEnabled(pUVM))
2791 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
2792
2793 int rc = CSAMR3SetScanningEnabled(pUVM, false);
2794 if (RT_FAILURE(rc))
2795 return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
2796 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning disabled\n");
2797}
2798
2799/**
2800 * @callback_method_impl{FNDBGCCMD, The '.csamon' command.}
2801 */
2802static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2803{
2804 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
2805 NOREF(cArgs); NOREF(paArgs);
2806
2807 if (HMR3IsEnabled(pUVM))
2808 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
2809
2810 int rc = CSAMR3SetScanningEnabled(pUVM, true);
2811 if (RT_FAILURE(rc))
2812 return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
2813 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning enabled\n");
2814}
2815
2816#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette