VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CSAMAll.cpp@ 62643

Last change on this file since 62643 was 62601, checked in by vboxsync, 9 years ago

VMM: Unused parameters.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 11.9 KB
Line 
1/* $Id: CSAMAll.cpp 62601 2016-07-27 15:46:22Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager - Any Context
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CSAM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/patm.h>
26#include <VBox/vmm/csam.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/hm.h>
30#include <VBox/vmm/mm.h>
31#ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33#endif
34#include <VBox/sup.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/param.h>
37#include <iprt/avl.h>
38#include "CSAMInternal.h"
39#include <VBox/vmm/vm.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/dbg.h>
42#include <VBox/err.h>
43#include <VBox/log.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef IN_RING0
51# error "IN_RING3 & IN_RC only!"
52#endif
53
54
55/**
56 * @callback_method_impl{FNPGMVIRTHANDLER,
57 * Access handler callback for virtual access handler ranges.}
58 */
59PGM_ALL_CB2_DECL(VBOXSTRICTRC)
60csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
61 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
62{
63 Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf));
64 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
65 Assert(VMCPU_IS_EMT(pVCpu));
66 RT_NOREF_PV(pvUser);
67 RT_NOREF_PV(enmOrigin);
68
69 /*
70 * Check if it's a dummy write that doesn't change anything.
71 */
72 if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
73 && !memcmp(pvPtr, pvBuf, cbBuf))
74 {
75 Log(("csamCodePageWriteHandler: dummy write -> ignore\n"));
76 return VINF_PGM_HANDLER_DO_DEFAULT;
77 }
78
79#ifdef IN_RING3
80 /*
81 * Ring-3: Do proper handling.
82 */
83 int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
84 AssertRC(rc);
85 return VINF_PGM_HANDLER_DO_DEFAULT;
86
87#else
88 /*
89 * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler).
90 */
91 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
92 bool const fPatchCode = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu));
93 PPATMGCSTATE pPATMGCState = PATMGetGCState(pVM);
94
95 Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
96 Assert(pPATMGCState);
97 Assert(pPATMGCState->fPIF || fPatchCode);
98
99# ifdef VBOX_WITH_REM
100 /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
101 /** @todo a bit overkill?? */
102 REMFlushTBs(pVM);
103# endif
104
105 /*
106 * When patch code is executing instructions that must complete, then we
107 * must *never* interrupt it.
108 */
109 if (!pPATMGCState->fPIF && fPatchCode)
110 {
111 Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu)));
112 return VINF_PGM_HANDLER_DO_DEFAULT;
113 }
114
115 Log(("csamRCCodePageWriteHandler: code page write at %RGv (cpl=%d)\n", GCPtr, cpl));
116
117 /*
118 * If user code is modifying one of our monitored pages, then we can safely
119 * write to it as it's no longer being used for supervisor code.
120 */
121 if (cpl != 3)
122 {
123 VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */, (RTRCPTR)GCPtr, cbBuf);
124 if ( rcStrict == VINF_PGM_HANDLER_DO_DEFAULT
125 || rcStrict == VINF_SUCCESS)
126 return rcStrict;
127 if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
128 {
129 STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
130 return VINF_EM_RAW_EMULATE_INSTR;
131 }
132 Assert(rcStrict == VERR_PATCH_NOT_FOUND);
133 }
134
135 /*
136 * Schedule ring-3 activity.
137 * Note that GCPtr might be a different address in case of aliases. So,
138 * take down both alternatives.
139 */
140 VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
141 pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
142 pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
143 if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
144 return VINF_CSAM_PENDING_ACTION;
145
146 /*
147 * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
148 */
149 Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtr));
150 STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
151 return VINF_PGM_HANDLER_DO_DEFAULT;
152#endif
153}
154
155
156/**
157 * Check if this page needs to be analysed by CSAM
158 *
159 * @returns VBox status code
160 * @param pVM The cross context VM structure.
161 * @param pvFault Fault address
162 */
163VMM_INT_DECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault)
164{
165 Assert(!HMIsEnabled(pVM));
166 if (!CSAMIsEnabled(pVM))
167 return VINF_SUCCESS;
168
169 LogFlow(("CSAMGCExecFault: for page %08X scanned=%d\n", pvFault, CSAMIsPageScanned(pVM, pvFault)));
170
171 if (CSAMIsPageScanned(pVM, pvFault))
172 {
173 // Already checked!
174 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesGC, 1);
175 return VINF_SUCCESS;
176 }
177
178 STAM_COUNTER_ADD(&pVM->csam.s.StatNrTraps, 1);
179 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_SCAN_PAGE);
180 return VINF_CSAM_PENDING_ACTION;
181}
182
183
184/**
185 * Check if this page was previously scanned by CSAM
186 *
187 * @returns true -> scanned, false -> not scanned
188 * @param pVM The cross context VM structure.
189 * @param pPage GC page address
190 */
191VMM_INT_DECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage)
192{
193 int pgdir, bit;
194 uintptr_t page;
195 Assert(!HMIsEnabled(pVM));
196
197 page = (uintptr_t)pPage;
198 pgdir = page >> X86_PAGE_4M_SHIFT;
199 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
200
201 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
202 Assert(bit < PAGE_SIZE);
203
204 return pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir] && ASMBitTest((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
205}
206
207
208
209/**
210 * Mark a page as scanned/not scanned
211 *
212 * @note: we always mark it as scanned, even if we haven't completely done so
213 *
214 * @returns VBox status code.
215 * @param pVM The cross context VM structure.
216 * @param pPage GC page address (not necessarily aligned)
217 * @param fScanned Mark as scanned or not scanned
218 *
219 */
220VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
221{
222 int pgdir, bit;
223 uintptr_t page;
224
225#ifdef LOG_ENABLED
226 if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
227 Log(("CSAMMarkPage %RRv\n", pPage));
228#endif
229
230 if (!CSAMIsEnabled(pVM))
231 return VINF_SUCCESS;
232 Assert(!HMIsEnabled(pVM));
233
234 page = (uintptr_t)pPage;
235 pgdir = page >> X86_PAGE_4M_SHIFT;
236 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
237
238 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
239 Assert(bit < PAGE_SIZE);
240
241 if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
242 {
243 STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
244 int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
245 if (RT_FAILURE(rc))
246 {
247 Log(("MMHyperAlloc failed with %Rrc\n", rc));
248 return rc;
249 }
250#ifdef IN_RC
251 pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
252 if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
253 {
254 Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
255 return rc;
256 }
257#else
258 pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
259 if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
260 {
261 Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
262 return rc;
263 }
264#endif
265 }
266 if(fScanned)
267 ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
268 else
269 ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
270
271 return VINF_SUCCESS;
272}
273
274/**
275 * Check if this page needs to be analysed by CSAM.
276 *
277 * This function should only be called for supervisor pages and
278 * only when CSAM is enabled. Leaving these selection criteria
279 * to the caller simplifies the interface (PTE passing).
280 *
281 * Note that the page has not yet been synced, so the TLB trick
282 * (which wasn't ever active anyway) cannot be applied.
283 *
284 * @returns true if the page should be marked not present because
285 * CSAM want need to scan it.
286 * @returns false if the page was already scanned.
287 * @param pVM The cross context VM structure.
288 * @param GCPtr GC pointer of page
289 */
290VMM_INT_DECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr)
291{
292 if (!CSAMIsEnabled(pVM))
293 return false;
294 Assert(!HMIsEnabled(pVM));
295
296 if(CSAMIsPageScanned(pVM, (RTRCPTR)GCPtr))
297 {
298 /* Already checked! */
299 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrKnownPages), 1);
300 return false;
301 }
302 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrPageNP), 1);
303 return true;
304}
305
306
307/**
308 * Remember a possible code page for later inspection
309 *
310 * @returns VBox status code.
311 * @param pVM The cross context VM structure.
312 * @param GCPtr GC pointer of page
313 */
314VMM_INT_DECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr)
315{
316 Assert(!HMIsEnabled(pVM));
317 if (pVM->csam.s.cPossibleCodePages < RT_ELEMENTS(pVM->csam.s.pvPossibleCodePage))
318 {
319 pVM->csam.s.pvPossibleCodePage[pVM->csam.s.cPossibleCodePages++] = (RTRCPTR)GCPtr;
320 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_PENDING_ACTION);
321 }
322 return;
323}
324
325
326/**
327 * Turn on code scanning
328 *
329 * @returns VBox status code.
330 * @param pVM The cross context VM structure.
331 */
332VMM_INT_DECL(int) CSAMEnableScanning(PVM pVM)
333{
334 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
335 pVM->fCSAMEnabled = true;
336 return VINF_SUCCESS;
337}
338
339/**
340 * Turn off code scanning
341 *
342 * @returns VBox status code.
343 * @param pVM The cross context VM structure.
344 */
345VMM_INT_DECL(int) CSAMDisableScanning(PVM pVM)
346{
347 pVM->fCSAMEnabled = false;
348 return VINF_SUCCESS;
349}
350
351
352/**
353 * Check if we've scanned this instruction before. If true, then we can emulate
354 * it instead of returning to ring 3.
355 *
356 * Using a simple array here as there are generally few mov crx instructions and
357 * tree lookup is likely to be more expensive. (as it would also have to be offset based)
358 *
359 * @returns boolean
360 * @param pVM The cross context VM structure.
361 * @param GCPtr GC pointer of page table entry
362 */
363VMM_INT_DECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr)
364{
365 Assert(!HMIsEnabled(pVM));
366
367 for (uint32_t i=0;i<pVM->csam.s.cDangerousInstr;i++)
368 {
369 if (pVM->csam.s.aDangerousInstr[i] == (RTRCPTR)GCPtr)
370 {
371 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheHit);
372 return true;
373 }
374 }
375 /* Record that we're about to process it in ring 3. */
376 pVM->csam.s.aDangerousInstr[pVM->csam.s.iDangerousInstr++] = (RTRCPTR)GCPtr;
377 pVM->csam.s.iDangerousInstr &= CSAM_MAX_DANGR_INSTR_MASK;
378
379 if (++pVM->csam.s.cDangerousInstr > CSAM_MAX_DANGR_INSTR)
380 pVM->csam.s.cDangerousInstr = CSAM_MAX_DANGR_INSTR;
381
382 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheMiss);
383 return false;
384}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette