VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp@ 94620

Last change on this file since 94620 was 93725, checked in by vboxsync, 3 years ago

VMM: More arm64 adjustments. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.2 KB
Line 
1/* $Id: GIMAllKvm.cpp 93725 2022-02-14 13:46:16Z vboxsync $ */
2/** @file
3 * GIM - Guest Interface Manager, KVM, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2015-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_GIM
23#include <VBox/vmm/gim.h>
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/pdmdev.h>
29#include <VBox/vmm/pdmapi.h>
30#include "GIMKvmInternal.h"
31#include "GIMInternal.h"
32#include <VBox/vmm/vmcc.h>
33
34#include <VBox/dis.h>
35#include <VBox/err.h>
36#include <VBox/sup.h>
37
38#include <iprt/time.h>
39
40
41/**
42 * Handles the KVM hypercall.
43 *
44 * @returns Strict VBox status code.
45 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
46 * failed).
47 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
48 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
49 *
50 * @param pVCpu The cross context virtual CPU structure.
51 * @param pCtx Pointer to the guest-CPU context.
52 *
53 * @thread EMT(pVCpu).
54 */
55VMM_INT_DECL(VBOXSTRICTRC) gimKvmHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx)
56{
57 VMCPU_ASSERT_EMT(pVCpu);
58
59 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
60 STAM_REL_COUNTER_INC(&pVM->gim.s.StatHypercalls);
61
62 /*
63 * Get the hypercall operation and arguments.
64 */
65 bool const fIs64BitMode = CPUMIsGuestIn64BitCodeEx(pCtx);
66 uint64_t uHyperOp = pCtx->rax;
67 uint64_t uHyperArg0 = pCtx->rbx;
68 uint64_t uHyperArg1 = pCtx->rcx;
69 uint64_t uHyperArg2 = pCtx->rdi;
70 uint64_t uHyperArg3 = pCtx->rsi;
71 uint64_t uHyperRet = KVM_HYPERCALL_RET_ENOSYS;
72 uint64_t uAndMask = UINT64_C(0xffffffffffffffff);
73 if (!fIs64BitMode)
74 {
75 uAndMask = UINT64_C(0xffffffff);
76 uHyperOp &= UINT64_C(0xffffffff);
77 uHyperArg0 &= UINT64_C(0xffffffff);
78 uHyperArg1 &= UINT64_C(0xffffffff);
79 uHyperArg2 &= UINT64_C(0xffffffff);
80 uHyperArg3 &= UINT64_C(0xffffffff);
81 uHyperRet &= UINT64_C(0xffffffff);
82 }
83
84 /*
85 * Verify that guest ring-0 is the one making the hypercall.
86 */
87 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
88 if (RT_UNLIKELY(uCpl))
89 {
90 pCtx->rax = KVM_HYPERCALL_RET_EPERM & uAndMask;
91 return VERR_GIM_HYPERCALL_ACCESS_DENIED;
92 }
93
94 /*
95 * Do the work.
96 */
97 int rc = VINF_SUCCESS;
98 switch (uHyperOp)
99 {
100 case KVM_HYPERCALL_OP_KICK_CPU:
101 {
102 if (uHyperArg1 < pVM->cCpus)
103 {
104 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, uHyperArg1); /* ASSUMES pVCpu index == ApicId of the VCPU. */
105 EMUnhaltAndWakeUp(pVM, pVCpuDst);
106 uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
107 }
108 else
109 {
110 /* Shouldn't ever happen! If it does, throw a guru, as otherwise it'll lead to deadlocks in the guest anyway! */
111 rc = VERR_GIM_HYPERCALL_FAILED;
112 }
113 break;
114 }
115
116 case KVM_HYPERCALL_OP_VAPIC_POLL_IRQ:
117 uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
118 break;
119
120 default:
121 break;
122 }
123
124 /*
125 * Place the result in rax/eax.
126 */
127 pCtx->rax = uHyperRet & uAndMask;
128 return rc;
129}
130
131
132/**
133 * Returns whether the guest has configured and enabled the use of KVM's
134 * hypercall interface.
135 *
136 * @returns true if hypercalls are enabled, false otherwise.
137 * @param pVCpu The cross context virtual CPU structure.
138 */
139VMM_INT_DECL(bool) gimKvmAreHypercallsEnabled(PVMCPU pVCpu)
140{
141 NOREF(pVCpu);
142 /* KVM paravirt interface doesn't have hypercall control bits (like Hyper-V does)
143 that guests can control, i.e. hypercalls are always enabled. */
144 return true;
145}
146
147
148/**
149 * Returns whether the guest has configured and enabled the use of KVM's
150 * paravirtualized TSC.
151 *
152 * @returns true if paravirt. TSC is enabled, false otherwise.
153 * @param pVM The cross context VM structure.
154 */
155VMM_INT_DECL(bool) gimKvmIsParavirtTscEnabled(PVMCC pVM)
156{
157 uint32_t const cCpus = pVM->cCpus;
158 for (uint32_t idCpu = 0; idCpu < cCpus; idCpu++)
159 {
160 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idCpu];
161 PGIMKVMCPU pGimKvmCpu = &pVCpu->gim.s.u.KvmCpu;
162 if (MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pGimKvmCpu->u64SystemTimeMsr))
163 return true;
164 }
165 return false;
166}
167
168
169/**
170 * MSR read handler for KVM.
171 *
172 * @returns Strict VBox status code like CPUMQueryGuestMsr().
173 * @retval VINF_CPUM_R3_MSR_READ
174 * @retval VERR_CPUM_RAISE_GP_0
175 *
176 * @param pVCpu The cross context virtual CPU structure.
177 * @param idMsr The MSR being read.
178 * @param pRange The range this MSR belongs to.
179 * @param puValue Where to store the MSR value read.
180 */
181VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
182{
183 NOREF(pRange);
184 PVM pVM = pVCpu->CTX_SUFF(pVM);
185 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
186 PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
187
188 switch (idMsr)
189 {
190 case MSR_GIM_KVM_SYSTEM_TIME:
191 case MSR_GIM_KVM_SYSTEM_TIME_OLD:
192 *puValue = pKvmCpu->u64SystemTimeMsr;
193 return VINF_SUCCESS;
194
195 case MSR_GIM_KVM_WALL_CLOCK:
196 case MSR_GIM_KVM_WALL_CLOCK_OLD:
197 *puValue = pKvm->u64WallClockMsr;
198 return VINF_SUCCESS;
199
200 default:
201 {
202#ifdef IN_RING3
203 static uint32_t s_cTimes = 0;
204 if (s_cTimes++ < 20)
205 LogRel(("GIM: KVM: Unknown/invalid RdMsr (%#x) -> #GP(0)\n", idMsr));
206#endif
207 LogFunc(("Unknown/invalid RdMsr (%#RX32) -> #GP(0)\n", idMsr));
208 break;
209 }
210 }
211
212 return VERR_CPUM_RAISE_GP_0;
213}
214
215
216/**
217 * MSR write handler for KVM.
218 *
219 * @returns Strict VBox status code like CPUMSetGuestMsr().
220 * @retval VINF_CPUM_R3_MSR_WRITE
221 * @retval VERR_CPUM_RAISE_GP_0
222 *
223 * @param pVCpu The cross context virtual CPU structure.
224 * @param idMsr The MSR being written.
225 * @param pRange The range this MSR belongs to.
226 * @param uRawValue The raw value with the ignored bits not masked.
227 */
228VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue)
229{
230 NOREF(pRange);
231 switch (idMsr)
232 {
233 case MSR_GIM_KVM_SYSTEM_TIME:
234 case MSR_GIM_KVM_SYSTEM_TIME_OLD:
235 {
236#ifndef IN_RING3
237 RT_NOREF2(pVCpu, uRawValue);
238 return VINF_CPUM_R3_MSR_WRITE;
239#else
240 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
241 PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
242 if (uRawValue & MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT)
243 gimR3KvmEnableSystemTime(pVM, pVCpu, uRawValue);
244 else
245 gimR3KvmDisableSystemTime(pVM);
246
247 pKvmCpu->u64SystemTimeMsr = uRawValue;
248 return VINF_SUCCESS;
249#endif /* IN_RING3 */
250 }
251
252 case MSR_GIM_KVM_WALL_CLOCK:
253 case MSR_GIM_KVM_WALL_CLOCK_OLD:
254 {
255#ifndef IN_RING3
256 RT_NOREF2(pVCpu, uRawValue);
257 return VINF_CPUM_R3_MSR_WRITE;
258#else
259 /* Enable the wall-clock struct. */
260 RTGCPHYS GCPhysWallClock = MSR_GIM_KVM_WALL_CLOCK_GUEST_GPA(uRawValue);
261 if (RT_LIKELY(RT_ALIGN_64(GCPhysWallClock, 4) == GCPhysWallClock))
262 {
263 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
264 int rc = gimR3KvmEnableWallClock(pVM, GCPhysWallClock);
265 if (RT_SUCCESS(rc))
266 {
267 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
268 pKvm->u64WallClockMsr = uRawValue;
269 return VINF_SUCCESS;
270 }
271 }
272 return VERR_CPUM_RAISE_GP_0;
273#endif /* IN_RING3 */
274 }
275
276 default:
277 {
278#ifdef IN_RING3
279 static uint32_t s_cTimes = 0;
280 if (s_cTimes++ < 20)
281 LogRel(("GIM: KVM: Unknown/invalid WrMsr (%#x,%#x`%08x) -> #GP(0)\n", idMsr,
282 uRawValue & UINT64_C(0xffffffff00000000), uRawValue & UINT64_C(0xffffffff)));
283#endif
284 LogFunc(("Unknown/invalid WrMsr (%#RX32,%#RX64) -> #GP(0)\n", idMsr, uRawValue));
285 break;
286 }
287 }
288
289 return VERR_CPUM_RAISE_GP_0;
290}
291
292
293/**
294 * Whether we need to trap \#UD exceptions in the guest.
295 *
296 * On AMD-V we need to trap them because paravirtualized Linux/KVM guests use
297 * the Intel VMCALL instruction to make hypercalls and we need to trap and
298 * optionally patch them to the AMD-V VMMCALL instruction and handle the
299 * hypercall.
300 *
301 * I guess this was done so that guest teleporation between an AMD and an Intel
302 * machine would working without any changes at the time of teleporation.
303 * However, this also means we -always- need to intercept \#UD exceptions on one
304 * of the two CPU models (Intel or AMD). Hyper-V solves this problem more
305 * elegantly by letting the hypervisor supply an opaque hypercall page.
306 *
307 * For raw-mode VMs, this function will always return true. See gimR3KvmInit().
308 *
309 * @param pVM The cross context VM structure.
310 */
311VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVM pVM)
312{
313 return pVM->gim.s.u.Kvm.fTrapXcptUD;
314}
315
316
317/**
318 * Checks the instruction and executes the hypercall if it's a valid hypercall
319 * instruction.
320 *
321 * This interface is used by \#UD handlers and IEM.
322 *
323 * @returns Strict VBox status code.
324 * @param pVCpu The cross context virtual CPU structure.
325 * @param pCtx Pointer to the guest-CPU context.
326 * @param uDisOpcode The disassembler opcode.
327 * @param cbInstr The instruction length.
328 *
329 * @thread EMT(pVCpu).
330 */
331VMM_INT_DECL(VBOXSTRICTRC) gimKvmHypercallEx(PVMCPUCC pVCpu, PCPUMCTX pCtx, unsigned uDisOpcode, uint8_t cbInstr)
332{
333 Assert(pVCpu);
334 Assert(pCtx);
335 VMCPU_ASSERT_EMT(pVCpu);
336
337 /*
338 * If the instruction at RIP is the Intel VMCALL instruction or
339 * the AMD VMMCALL instruction handle it as a hypercall.
340 *
341 * Linux/KVM guests always uses the Intel VMCALL instruction but we patch
342 * it to the host-native one whenever we encounter it so subsequent calls
343 * will not require disassembly (when coming from HM).
344 */
345 if ( uDisOpcode == OP_VMCALL
346 || uDisOpcode == OP_VMMCALL)
347 {
348 /*
349 * Perform the hypercall.
350 *
351 * For HM, we can simply resume guest execution without performing the hypercall now and
352 * do it on the next VMCALL/VMMCALL exit handler on the patched instruction.
353 *
354 * For raw-mode we need to do this now anyway. So we do it here regardless with an added
355 * advantage is that it saves one world-switch for the HM case.
356 */
357 VBOXSTRICTRC rcStrict = gimKvmHypercall(pVCpu, pCtx);
358 if (rcStrict == VINF_SUCCESS)
359 {
360 /*
361 * Patch the instruction to so we don't have to spend time disassembling it each time.
362 * Makes sense only for HM as with raw-mode we will be getting a #UD regardless.
363 */
364 PVM pVM = pVCpu->CTX_SUFF(pVM);
365 PCGIMKVM pKvm = &pVM->gim.s.u.Kvm;
366 if ( uDisOpcode != pKvm->uOpcodeNative
367 && cbInstr == sizeof(pKvm->abOpcodeNative) )
368 {
369 /** @todo r=ramshankar: we probably should be doing this in an
370 * EMT rendezvous. */
371 /** @todo Add stats for patching. */
372 int rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, pKvm->abOpcodeNative, sizeof(pKvm->abOpcodeNative));
373 AssertRC(rc);
374 }
375 }
376 else
377 {
378 /* The KVM provider doesn't have any concept of continuing hypercalls. */
379 Assert(rcStrict != VINF_GIM_HYPERCALL_CONTINUING);
380#ifdef IN_RING3
381 Assert(rcStrict != VINF_GIM_R3_HYPERCALL);
382#endif
383 }
384 return rcStrict;
385 }
386
387 return VERR_GIM_INVALID_HYPERCALL_INSTR;
388}
389
390
391/**
392 * Exception handler for \#UD.
393 *
394 * @returns Strict VBox status code.
395 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
396 * failed).
397 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
398 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
399 * @retval VERR_GIM_INVALID_HYPERCALL_INSTR instruction at RIP is not a valid
400 * hypercall instruction.
401 *
402 * @param pVM The cross context VM structure.
403 * @param pVCpu The cross context virtual CPU structure.
404 * @param pCtx Pointer to the guest-CPU context.
405 * @param pDis Pointer to the disassembled instruction state at RIP.
406 * Optional, can be NULL.
407 * @param pcbInstr Where to store the instruction length of the hypercall
408 * instruction. Optional, can be NULL.
409 *
410 * @thread EMT(pVCpu).
411 */
412VMM_INT_DECL(VBOXSTRICTRC) gimKvmXcptUD(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr)
413{
414 VMCPU_ASSERT_EMT(pVCpu);
415
416 /*
417 * If we didn't ask for #UD to be trapped, bail.
418 */
419 if (RT_UNLIKELY(!pVM->gim.s.u.Kvm.fTrapXcptUD))
420 return VERR_GIM_IPE_3;
421
422 if (!pDis)
423 {
424 unsigned cbInstr;
425 DISCPUSTATE Dis;
426 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbInstr);
427 if (RT_SUCCESS(rc))
428 {
429 if (pcbInstr)
430 *pcbInstr = (uint8_t)cbInstr;
431 return gimKvmHypercallEx(pVCpu, pCtx, Dis.pCurInstr->uOpcode, Dis.cbInstr);
432 }
433
434 Log(("GIM: KVM: Failed to disassemble instruction at CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc));
435 return rc;
436 }
437
438 return gimKvmHypercallEx(pVCpu, pCtx, pDis->pCurInstr->uOpcode, pDis->cbInstr);
439}
440
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette