VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 45276

Last change on this file since 45276 was 44528, checked in by vboxsync, 12 years ago

header (C) fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 9.7 KB
Line 
1/* $Id: HMAll.cpp 44528 2013-02-04 14:27:54Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <VBox/vmm/hm.h>
24#include <VBox/vmm/pgm.h>
25#include "HMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/hm_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/x86.h>
36
37
38/**
39 * Queues a page for invalidation
40 *
41 * @returns VBox status code.
42 * @param pVCpu Pointer to the VMCPU.
43 * @param GCVirt Page to invalidate
44 */
45static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
46{
47 /* Nothing to do if a TLB flush is already pending */
48 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
49 return;
50#if 1
51 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
52 NOREF(GCVirt);
53#else
54 /* Be very careful when activating this code! */
55 if (iPage == RT_ELEMENTS(pVCpu->hm.s.TlbShootdown.aPages))
56 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
57 else
58 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
59#endif
60}
61
62/**
63 * Invalidates a guest page
64 *
65 * @returns VBox status code.
66 * @param pVCpu Pointer to the VMCPU.
67 * @param GCVirt Page to invalidate
68 */
69VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
70{
71 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
72#ifdef IN_RING0
73 PVM pVM = pVCpu->CTX_SUFF(pVM);
74 if (pVM->hm.s.vmx.fSupported)
75 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
76
77 Assert(pVM->hm.s.svm.fSupported);
78 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
79
80#else
81 hmQueueInvlPage(pVCpu, GCVirt);
82 return VINF_SUCCESS;
83#endif
84}
85
86/**
87 * Flushes the guest TLB
88 *
89 * @returns VBox status code.
90 * @param pVCpu Pointer to the VMCPU.
91 */
92VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
93{
94 LogFlow(("HMFlushTLB\n"));
95
96 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
97 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
98 return VINF_SUCCESS;
99}
100
101#ifdef IN_RING0
102
103/**
104 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
105 *
106 */
107static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
108{
109 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
110 return;
111}
112
113/**
114 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
115 */
116static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
117{
118 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
119
120 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
121 int rc = RTMpPokeCpu(idHostCpu);
122 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
123
124 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
125 back to a less efficient implementation (broadcast). */
126 if (rc == VERR_NOT_SUPPORTED)
127 {
128 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
129 /* synchronous. */
130 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
131 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
132 }
133 else
134 {
135 if (rc == VINF_SUCCESS)
136 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
137 else
138 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
139
140/** @todo If more than one CPU is going to be poked, we could optimize this
141 * operation by poking them first and wait afterwards. Would require
142 * recording who to poke and their current cWorldSwitchExits values,
143 * that's something not suitable for stack... So, pVCpu->hm.s.something
144 * then. */
145 /* Spin until the VCPU has switched back (poking is async). */
146 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
147 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
148 ASMNopPause();
149
150 if (rc == VINF_SUCCESS)
151 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
152 else
153 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
154 }
155}
156
157#endif /* IN_RING0 */
158#ifndef IN_RC
159
160/**
161 * Poke an EMT so it can perform the appropriate TLB shootdowns.
162 *
163 * @param pVCpu The handle of the virtual CPU to poke.
164 * @param fAccountFlushStat Whether to account the call to
165 * StatTlbShootdownFlush or StatTlbShootdown.
166 */
167static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
168{
169 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
170 {
171 if (fAccountFlushStat)
172 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
173 else
174 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
175#ifdef IN_RING0
176 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
177 if (idHostCpu != NIL_RTCPUID)
178 hmR0PokeCpu(pVCpu, idHostCpu);
179#else
180 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
181#endif
182 }
183 else
184 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
185}
186
187
188/**
189 * Invalidates a guest page on all VCPUs.
190 *
191 * @returns VBox status code.
192 * @param pVM Pointer to the VM.
193 * @param GCVirt Page to invalidate
194 */
195VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr)
196{
197 VMCPUID idCurCpu = VMMGetCpuId(pVM);
198 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
199
200 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
201 {
202 PVMCPU pVCpu = &pVM->aCpus[idCpu];
203
204 /* Nothing to do if a TLB flush is already pending; the VCPU should
205 have already been poked if it were active. */
206 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
207 continue;
208
209 if (pVCpu->idCpu == idCurCpu)
210 HMInvalidatePage(pVCpu, GCPtr);
211 else
212 {
213 hmQueueInvlPage(pVCpu, GCPtr);
214 hmPokeCpuForTlbFlush(pVCpu, false /*fAccountFlushStat*/);
215 }
216 }
217
218 return VINF_SUCCESS;
219}
220
221
222/**
223 * Flush the TLBs of all VCPUs
224 *
225 * @returns VBox status code.
226 * @param pVM Pointer to the VM.
227 */
228VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
229{
230 if (pVM->cCpus == 1)
231 return HMFlushTLB(&pVM->aCpus[0]);
232
233 VMCPUID idThisCpu = VMMGetCpuId(pVM);
234
235 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
236
237 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
238 {
239 PVMCPU pVCpu = &pVM->aCpus[idCpu];
240
241 /* Nothing to do if a TLB flush is already pending; the VCPU should
242 have already been poked if it were active. */
243 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
244 {
245 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
246 if (idThisCpu != idCpu)
247 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
248 }
249 }
250
251 return VINF_SUCCESS;
252}
253
254#endif /* !IN_RC */
255
256/**
257 * Checks if nested paging is enabled
258 *
259 * @returns boolean
260 * @param pVM Pointer to the VM.
261 */
262VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
263{
264 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
265}
266
267/**
268 * Return the shadow paging mode for nested paging/ept
269 *
270 * @returns shadow paging mode
271 * @param pVM Pointer to the VM.
272 */
273VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
274{
275 Assert(HMIsNestedPagingActive(pVM));
276 if (pVM->hm.s.svm.fSupported)
277 return PGMMODE_NESTED;
278
279 Assert(pVM->hm.s.vmx.fSupported);
280 return PGMMODE_EPT;
281}
282
283/**
284 * Invalidates a guest page by physical address
285 *
286 * NOTE: Assumes the current instruction references this physical page though a virtual address!!
287 *
288 * @returns VBox status code.
289 * @param pVM Pointer to the VM.
290 * @param GCPhys Page to invalidate
291 */
292VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
293{
294 if (!HMIsNestedPagingActive(pVM))
295 return VINF_SUCCESS;
296
297#ifdef IN_RING0
298 if (pVM->hm.s.vmx.fSupported)
299 {
300 VMCPUID idThisCpu = VMMGetCpuId(pVM);
301
302 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
303 {
304 PVMCPU pVCpu = &pVM->aCpus[idCpu];
305
306 if (idThisCpu == idCpu)
307 {
308 /** @todo r=ramshankar: Intel does not support flushing by guest physical
309 * address either. See comment in VMXR0InvalidatePhysPage(). Fix this. */
310 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
311 }
312 else
313 {
314 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
315 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
316 }
317 }
318 return VINF_SUCCESS;
319 }
320
321 /* AMD-V doesn't support invalidation with guest physical addresses; see
322 comment in SVMR0InvalidatePhysPage. */
323 Assert(pVM->hm.s.svm.fSupported);
324#else
325 NOREF(GCPhys);
326#endif
327
328 HMFlushTLBOnAllVCpus(pVM);
329 return VINF_SUCCESS;
330}
331
332/**
333 * Checks if an interrupt event is currently pending.
334 *
335 * @returns Interrupt event pending state.
336 * @param pVM Pointer to the VM.
337 */
338VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
339{
340 PVMCPU pVCpu = VMMGetCpu(pVM);
341 return !!pVCpu->hm.s.Event.fPending;
342}
343
344
345/**
346 * Return the PAE PDPE entries.
347 *
348 * @returns Pointer to the PAE PDPE array.
349 * @param pVCpu Pointer to the VMCPU.
350 */
351VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
352{
353 return &pVCpu->hm.s.aPdpes[0];
354}
355
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette