1 | /* $Id: NEMR3NativeTemplate-linux.cpp.h 104840 2024-06-05 00:59:51Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * NEM - Native execution manager, native ring-3 Linux backend, common bits for x86 and arm64.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2021-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 | /*
|
---|
29 | * Supply stuff missing from the kvm.h on the build box.
|
---|
30 | */
|
---|
31 | #ifndef KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON /* since 5.4 */
|
---|
32 | # define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
|
---|
33 | #endif
|
---|
34 |
|
---|
35 |
|
---|
36 |
|
---|
37 | /**
|
---|
38 | * Worker for nemR3NativeInit that gets the hypervisor capabilities.
|
---|
39 | *
|
---|
40 | * @returns VBox status code.
|
---|
41 | * @param pVM The cross context VM structure.
|
---|
42 | * @param pErrInfo Where to always return error info.
|
---|
43 | */
|
---|
44 | static int nemR3LnxInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
|
---|
45 | {
|
---|
46 | AssertReturn(pVM->nem.s.fdKvm != -1, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
|
---|
47 |
|
---|
48 | /*
|
---|
49 | * Capabilities.
|
---|
50 | */
|
---|
51 | static const struct
|
---|
52 | {
|
---|
53 | const char *pszName;
|
---|
54 | int iCap;
|
---|
55 | uint32_t offNem : 24;
|
---|
56 | uint32_t cbNem : 3;
|
---|
57 | uint32_t fReqNonZero : 1;
|
---|
58 | uint32_t uReserved : 4;
|
---|
59 | } s_aCaps[] =
|
---|
60 | {
|
---|
61 | #define CAP_ENTRY__L(a_Define) { #a_Define, a_Define, UINT32_C(0x00ffffff), 0, 0, 0 }
|
---|
62 | #define CAP_ENTRY__S(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 0, 0 }
|
---|
63 | #define CAP_ENTRY_MS(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 1, 0 }
|
---|
64 | #define CAP_ENTRY__U(a_Number) { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 0, 0 }
|
---|
65 | #define CAP_ENTRY_ML(a_Number) { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 1, 0 }
|
---|
66 |
|
---|
67 | CAP_ENTRY__L(KVM_CAP_IRQCHIP), /* 0 */
|
---|
68 | #ifdef VBOX_VMM_TARGET_ARMV8
|
---|
69 | CAP_ENTRY__L(KVM_CAP_HLT),
|
---|
70 | #else
|
---|
71 | CAP_ENTRY_ML(KVM_CAP_HLT),
|
---|
72 | #endif
|
---|
73 | CAP_ENTRY__L(KVM_CAP_MMU_SHADOW_CACHE_CONTROL),
|
---|
74 | CAP_ENTRY_ML(KVM_CAP_USER_MEMORY),
|
---|
75 | CAP_ENTRY__L(KVM_CAP_SET_TSS_ADDR),
|
---|
76 | CAP_ENTRY__U(5),
|
---|
77 | CAP_ENTRY__L(KVM_CAP_VAPIC),
|
---|
78 | CAP_ENTRY__L(KVM_CAP_EXT_CPUID),
|
---|
79 | CAP_ENTRY__L(KVM_CAP_CLOCKSOURCE),
|
---|
80 | CAP_ENTRY__L(KVM_CAP_NR_VCPUS),
|
---|
81 | CAP_ENTRY_MS(KVM_CAP_NR_MEMSLOTS, cMaxMemSlots), /* 10 */
|
---|
82 | CAP_ENTRY__L(KVM_CAP_PIT),
|
---|
83 | CAP_ENTRY__L(KVM_CAP_NOP_IO_DELAY),
|
---|
84 | CAP_ENTRY__L(KVM_CAP_PV_MMU),
|
---|
85 | CAP_ENTRY__L(KVM_CAP_MP_STATE),
|
---|
86 | CAP_ENTRY__L(KVM_CAP_COALESCED_MMIO),
|
---|
87 | CAP_ENTRY__L(KVM_CAP_SYNC_MMU),
|
---|
88 | CAP_ENTRY__U(17),
|
---|
89 | CAP_ENTRY__L(KVM_CAP_IOMMU),
|
---|
90 | CAP_ENTRY__U(19), /* Buggy KVM_CAP_JOIN_MEMORY_REGIONS? */
|
---|
91 | CAP_ENTRY__U(20), /* Mon-working KVM_CAP_DESTROY_MEMORY_REGION? */
|
---|
92 | CAP_ENTRY__L(KVM_CAP_DESTROY_MEMORY_REGION_WORKS), /* 21 */
|
---|
93 | CAP_ENTRY__L(KVM_CAP_USER_NMI),
|
---|
94 | #ifdef __KVM_HAVE_GUEST_DEBUG
|
---|
95 | CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG),
|
---|
96 | #endif
|
---|
97 | #ifdef __KVM_HAVE_PIT
|
---|
98 | CAP_ENTRY__L(KVM_CAP_REINJECT_CONTROL),
|
---|
99 | #endif
|
---|
100 | CAP_ENTRY__L(KVM_CAP_IRQ_ROUTING),
|
---|
101 | CAP_ENTRY__L(KVM_CAP_IRQ_INJECT_STATUS),
|
---|
102 | CAP_ENTRY__U(27),
|
---|
103 | CAP_ENTRY__U(28),
|
---|
104 | CAP_ENTRY__L(KVM_CAP_ASSIGN_DEV_IRQ),
|
---|
105 | CAP_ENTRY__L(KVM_CAP_JOIN_MEMORY_REGIONS_WORKS), /* 30 */
|
---|
106 | #ifdef __KVM_HAVE_MCE
|
---|
107 | CAP_ENTRY__L(KVM_CAP_MCE),
|
---|
108 | #endif
|
---|
109 | CAP_ENTRY__L(KVM_CAP_IRQFD),
|
---|
110 | #ifdef __KVM_HAVE_PIT
|
---|
111 | CAP_ENTRY__L(KVM_CAP_PIT2),
|
---|
112 | #endif
|
---|
113 | CAP_ENTRY__L(KVM_CAP_SET_BOOT_CPU_ID),
|
---|
114 | #ifdef __KVM_HAVE_PIT_STATE2
|
---|
115 | CAP_ENTRY__L(KVM_CAP_PIT_STATE2),
|
---|
116 | #endif
|
---|
117 | CAP_ENTRY__L(KVM_CAP_IOEVENTFD),
|
---|
118 | CAP_ENTRY__L(KVM_CAP_SET_IDENTITY_MAP_ADDR),
|
---|
119 | #ifdef __KVM_HAVE_XEN_HVM
|
---|
120 | CAP_ENTRY__L(KVM_CAP_XEN_HVM),
|
---|
121 | #endif
|
---|
122 | #ifdef VBOX_VMM_TARGET_ARMV8
|
---|
123 | CAP_ENTRY__L(KVM_CAP_ADJUST_CLOCK),
|
---|
124 | #else
|
---|
125 | CAP_ENTRY_ML(KVM_CAP_ADJUST_CLOCK),
|
---|
126 | #endif
|
---|
127 | CAP_ENTRY__L(KVM_CAP_INTERNAL_ERROR_DATA), /* 40 */
|
---|
128 | #ifdef __KVM_HAVE_VCPU_EVENTS
|
---|
129 | CAP_ENTRY_ML(KVM_CAP_VCPU_EVENTS),
|
---|
130 | #else
|
---|
131 | CAP_ENTRY__U(41),
|
---|
132 | #endif
|
---|
133 | CAP_ENTRY__L(KVM_CAP_S390_PSW),
|
---|
134 | CAP_ENTRY__L(KVM_CAP_PPC_SEGSTATE),
|
---|
135 | CAP_ENTRY__L(KVM_CAP_HYPERV),
|
---|
136 | CAP_ENTRY__L(KVM_CAP_HYPERV_VAPIC),
|
---|
137 | CAP_ENTRY__L(KVM_CAP_HYPERV_SPIN),
|
---|
138 | CAP_ENTRY__L(KVM_CAP_PCI_SEGMENT),
|
---|
139 | CAP_ENTRY__L(KVM_CAP_PPC_PAIRED_SINGLES),
|
---|
140 | CAP_ENTRY__L(KVM_CAP_INTR_SHADOW),
|
---|
141 | #ifdef __KVM_HAVE_DEBUGREGS
|
---|
142 | CAP_ENTRY__L(KVM_CAP_DEBUGREGS), /* 50 */
|
---|
143 | #endif
|
---|
144 | #ifdef VBOX_VMM_TARGET_ARMV8
|
---|
145 | CAP_ENTRY__L(KVM_CAP_X86_ROBUST_SINGLESTEP),
|
---|
146 | #else
|
---|
147 | CAP_ENTRY__S(KVM_CAP_X86_ROBUST_SINGLESTEP, fRobustSingleStep),
|
---|
148 | #endif
|
---|
149 | CAP_ENTRY__L(KVM_CAP_PPC_OSI),
|
---|
150 | CAP_ENTRY__L(KVM_CAP_PPC_UNSET_IRQ),
|
---|
151 | CAP_ENTRY__L(KVM_CAP_ENABLE_CAP),
|
---|
152 | #ifdef __KVM_HAVE_XSAVE
|
---|
153 | CAP_ENTRY_ML(KVM_CAP_XSAVE),
|
---|
154 | #else
|
---|
155 | CAP_ENTRY__U(55),
|
---|
156 | #endif
|
---|
157 | #ifdef __KVM_HAVE_XCRS
|
---|
158 | CAP_ENTRY_ML(KVM_CAP_XCRS),
|
---|
159 | #else
|
---|
160 | CAP_ENTRY__U(56),
|
---|
161 | #endif
|
---|
162 | CAP_ENTRY__L(KVM_CAP_PPC_GET_PVINFO),
|
---|
163 | CAP_ENTRY__L(KVM_CAP_PPC_IRQ_LEVEL),
|
---|
164 | CAP_ENTRY__L(KVM_CAP_ASYNC_PF),
|
---|
165 | CAP_ENTRY__L(KVM_CAP_TSC_CONTROL), /* 60 */
|
---|
166 | CAP_ENTRY__L(KVM_CAP_GET_TSC_KHZ),
|
---|
167 | CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_SREGS),
|
---|
168 | CAP_ENTRY__L(KVM_CAP_SPAPR_TCE),
|
---|
169 | CAP_ENTRY__L(KVM_CAP_PPC_SMT),
|
---|
170 | CAP_ENTRY__L(KVM_CAP_PPC_RMA),
|
---|
171 | CAP_ENTRY__L(KVM_CAP_MAX_VCPUS),
|
---|
172 | CAP_ENTRY__L(KVM_CAP_PPC_HIOR),
|
---|
173 | CAP_ENTRY__L(KVM_CAP_PPC_PAPR),
|
---|
174 | CAP_ENTRY__L(KVM_CAP_SW_TLB),
|
---|
175 | CAP_ENTRY__L(KVM_CAP_ONE_REG), /* 70 */
|
---|
176 | CAP_ENTRY__L(KVM_CAP_S390_GMAP),
|
---|
177 | CAP_ENTRY__L(KVM_CAP_TSC_DEADLINE_TIMER),
|
---|
178 | CAP_ENTRY__L(KVM_CAP_S390_UCONTROL),
|
---|
179 | CAP_ENTRY__L(KVM_CAP_SYNC_REGS),
|
---|
180 | CAP_ENTRY__L(KVM_CAP_PCI_2_3),
|
---|
181 | CAP_ENTRY__L(KVM_CAP_KVMCLOCK_CTRL),
|
---|
182 | CAP_ENTRY__L(KVM_CAP_SIGNAL_MSI),
|
---|
183 | CAP_ENTRY__L(KVM_CAP_PPC_GET_SMMU_INFO),
|
---|
184 | CAP_ENTRY__L(KVM_CAP_S390_COW),
|
---|
185 | CAP_ENTRY__L(KVM_CAP_PPC_ALLOC_HTAB), /* 80 */
|
---|
186 | CAP_ENTRY__L(KVM_CAP_READONLY_MEM),
|
---|
187 | CAP_ENTRY__L(KVM_CAP_IRQFD_RESAMPLE),
|
---|
188 | CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_WATCHDOG),
|
---|
189 | CAP_ENTRY__L(KVM_CAP_PPC_HTAB_FD),
|
---|
190 | CAP_ENTRY__L(KVM_CAP_S390_CSS_SUPPORT),
|
---|
191 | CAP_ENTRY__L(KVM_CAP_PPC_EPR),
|
---|
192 | #ifdef VBOX_VMM_TARGET_ARMV8
|
---|
193 | CAP_ENTRY_ML(KVM_CAP_ARM_PSCI),
|
---|
194 | CAP_ENTRY_ML(KVM_CAP_ARM_SET_DEVICE_ADDR),
|
---|
195 | CAP_ENTRY_ML(KVM_CAP_DEVICE_CTRL),
|
---|
196 | #else
|
---|
197 | CAP_ENTRY__L(KVM_CAP_ARM_PSCI),
|
---|
198 | CAP_ENTRY__L(KVM_CAP_ARM_SET_DEVICE_ADDR),
|
---|
199 | CAP_ENTRY__L(KVM_CAP_DEVICE_CTRL),
|
---|
200 | #endif
|
---|
201 | CAP_ENTRY__L(KVM_CAP_IRQ_MPIC), /* 90 */
|
---|
202 | CAP_ENTRY__L(KVM_CAP_PPC_RTAS),
|
---|
203 | CAP_ENTRY__L(KVM_CAP_IRQ_XICS),
|
---|
204 | CAP_ENTRY__L(KVM_CAP_ARM_EL1_32BIT),
|
---|
205 | CAP_ENTRY__L(KVM_CAP_SPAPR_MULTITCE),
|
---|
206 | CAP_ENTRY__L(KVM_CAP_EXT_EMUL_CPUID),
|
---|
207 | CAP_ENTRY__L(KVM_CAP_HYPERV_TIME),
|
---|
208 | CAP_ENTRY__L(KVM_CAP_IOAPIC_POLARITY_IGNORED),
|
---|
209 | CAP_ENTRY__L(KVM_CAP_ENABLE_CAP_VM),
|
---|
210 | CAP_ENTRY__L(KVM_CAP_S390_IRQCHIP),
|
---|
211 | CAP_ENTRY__L(KVM_CAP_IOEVENTFD_NO_LENGTH), /* 100 */
|
---|
212 | CAP_ENTRY__L(KVM_CAP_VM_ATTRIBUTES),
|
---|
213 | #ifdef VBOX_VMM_TARGET_ARMV8
|
---|
214 | CAP_ENTRY_ML(KVM_CAP_ARM_PSCI_0_2),
|
---|
215 | #else
|
---|
216 | CAP_ENTRY__L(KVM_CAP_ARM_PSCI_0_2),
|
---|
217 | #endif
|
---|
218 | CAP_ENTRY__L(KVM_CAP_PPC_FIXUP_HCALL),
|
---|
219 | CAP_ENTRY__L(KVM_CAP_PPC_ENABLE_HCALL),
|
---|
220 | CAP_ENTRY__L(KVM_CAP_CHECK_EXTENSION_VM),
|
---|
221 | CAP_ENTRY__L(KVM_CAP_S390_USER_SIGP),
|
---|
222 | CAP_ENTRY__L(KVM_CAP_S390_VECTOR_REGISTERS),
|
---|
223 | CAP_ENTRY__L(KVM_CAP_S390_MEM_OP),
|
---|
224 | CAP_ENTRY__L(KVM_CAP_S390_USER_STSI),
|
---|
225 | CAP_ENTRY__L(KVM_CAP_S390_SKEYS), /* 110 */
|
---|
226 | CAP_ENTRY__L(KVM_CAP_MIPS_FPU),
|
---|
227 | CAP_ENTRY__L(KVM_CAP_MIPS_MSA),
|
---|
228 | CAP_ENTRY__L(KVM_CAP_S390_INJECT_IRQ),
|
---|
229 | CAP_ENTRY__L(KVM_CAP_S390_IRQ_STATE),
|
---|
230 | CAP_ENTRY__L(KVM_CAP_PPC_HWRNG),
|
---|
231 | CAP_ENTRY__L(KVM_CAP_DISABLE_QUIRKS),
|
---|
232 | CAP_ENTRY__L(KVM_CAP_X86_SMM),
|
---|
233 | CAP_ENTRY__L(KVM_CAP_MULTI_ADDRESS_SPACE),
|
---|
234 | CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_BPS),
|
---|
235 | CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_WPS), /* 120 */
|
---|
236 | CAP_ENTRY__L(KVM_CAP_SPLIT_IRQCHIP),
|
---|
237 | CAP_ENTRY__L(KVM_CAP_IOEVENTFD_ANY_LENGTH),
|
---|
238 | CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC),
|
---|
239 | CAP_ENTRY__L(KVM_CAP_S390_RI),
|
---|
240 | CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_64),
|
---|
241 | CAP_ENTRY__L(KVM_CAP_ARM_PMU_V3),
|
---|
242 | CAP_ENTRY__L(KVM_CAP_VCPU_ATTRIBUTES),
|
---|
243 | CAP_ENTRY__L(KVM_CAP_MAX_VCPU_ID),
|
---|
244 | CAP_ENTRY__L(KVM_CAP_X2APIC_API),
|
---|
245 | CAP_ENTRY__L(KVM_CAP_S390_USER_INSTR0), /* 130 */
|
---|
246 | CAP_ENTRY__L(KVM_CAP_MSI_DEVID),
|
---|
247 | CAP_ENTRY__L(KVM_CAP_PPC_HTM),
|
---|
248 | CAP_ENTRY__L(KVM_CAP_SPAPR_RESIZE_HPT),
|
---|
249 | CAP_ENTRY__L(KVM_CAP_PPC_MMU_RADIX),
|
---|
250 | CAP_ENTRY__L(KVM_CAP_PPC_MMU_HASH_V3),
|
---|
251 | CAP_ENTRY__L(KVM_CAP_IMMEDIATE_EXIT),
|
---|
252 | CAP_ENTRY__L(KVM_CAP_MIPS_VZ),
|
---|
253 | CAP_ENTRY__L(KVM_CAP_MIPS_TE),
|
---|
254 | CAP_ENTRY__L(KVM_CAP_MIPS_64BIT),
|
---|
255 | CAP_ENTRY__L(KVM_CAP_S390_GS), /* 140 */
|
---|
256 | CAP_ENTRY__L(KVM_CAP_S390_AIS),
|
---|
257 | CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_VFIO),
|
---|
258 | CAP_ENTRY__L(KVM_CAP_X86_DISABLE_EXITS),
|
---|
259 | CAP_ENTRY__L(KVM_CAP_ARM_USER_IRQ),
|
---|
260 | CAP_ENTRY__L(KVM_CAP_S390_CMMA_MIGRATION),
|
---|
261 | CAP_ENTRY__L(KVM_CAP_PPC_FWNMI),
|
---|
262 | CAP_ENTRY__L(KVM_CAP_PPC_SMT_POSSIBLE),
|
---|
263 | CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC2),
|
---|
264 | CAP_ENTRY__L(KVM_CAP_HYPERV_VP_INDEX),
|
---|
265 | CAP_ENTRY__L(KVM_CAP_S390_AIS_MIGRATION), /* 150 */
|
---|
266 | CAP_ENTRY__L(KVM_CAP_PPC_GET_CPU_CHAR),
|
---|
267 | CAP_ENTRY__L(KVM_CAP_S390_BPB),
|
---|
268 | CAP_ENTRY__L(KVM_CAP_GET_MSR_FEATURES),
|
---|
269 | CAP_ENTRY__L(KVM_CAP_HYPERV_EVENTFD),
|
---|
270 | CAP_ENTRY__L(KVM_CAP_HYPERV_TLBFLUSH),
|
---|
271 | CAP_ENTRY__L(KVM_CAP_S390_HPAGE_1M),
|
---|
272 | CAP_ENTRY__L(KVM_CAP_NESTED_STATE),
|
---|
273 | CAP_ENTRY__L(KVM_CAP_ARM_INJECT_SERROR_ESR),
|
---|
274 | CAP_ENTRY__L(KVM_CAP_MSR_PLATFORM_INFO),
|
---|
275 | CAP_ENTRY__L(KVM_CAP_PPC_NESTED_HV), /* 160 */
|
---|
276 | CAP_ENTRY__L(KVM_CAP_HYPERV_SEND_IPI),
|
---|
277 | CAP_ENTRY__L(KVM_CAP_COALESCED_PIO),
|
---|
278 | CAP_ENTRY__L(KVM_CAP_HYPERV_ENLIGHTENED_VMCS),
|
---|
279 | CAP_ENTRY__L(KVM_CAP_EXCEPTION_PAYLOAD),
|
---|
280 | #ifdef VBOX_VMM_TARGET_ARMV8
|
---|
281 | CAP_ENTRY_MS(KVM_CAP_ARM_VM_IPA_SIZE, cIpaBits),
|
---|
282 | #else
|
---|
283 | CAP_ENTRY__L(KVM_CAP_ARM_VM_IPA_SIZE),
|
---|
284 | #endif
|
---|
285 | CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT),
|
---|
286 | CAP_ENTRY__L(KVM_CAP_HYPERV_CPUID),
|
---|
287 | CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2),
|
---|
288 | CAP_ENTRY__L(KVM_CAP_PPC_IRQ_XIVE),
|
---|
289 | CAP_ENTRY__L(KVM_CAP_ARM_SVE), /* 170 */
|
---|
290 | CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_ADDRESS),
|
---|
291 | CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_GENERIC),
|
---|
292 | CAP_ENTRY__L(KVM_CAP_PMU_EVENT_FILTER),
|
---|
293 | CAP_ENTRY__L(KVM_CAP_ARM_IRQ_LINE_LAYOUT_2),
|
---|
294 | CAP_ENTRY__L(KVM_CAP_HYPERV_DIRECT_TLBFLUSH),
|
---|
295 | CAP_ENTRY__L(KVM_CAP_PPC_GUEST_DEBUG_SSTEP),
|
---|
296 | CAP_ENTRY__L(KVM_CAP_ARM_NISV_TO_USER),
|
---|
297 | CAP_ENTRY__L(KVM_CAP_ARM_INJECT_EXT_DABT),
|
---|
298 | CAP_ENTRY__L(KVM_CAP_S390_VCPU_RESETS),
|
---|
299 | CAP_ENTRY__L(KVM_CAP_S390_PROTECTED), /* 180 */
|
---|
300 | CAP_ENTRY__L(KVM_CAP_PPC_SECURE_GUEST),
|
---|
301 | CAP_ENTRY__L(KVM_CAP_HALT_POLL),
|
---|
302 | CAP_ENTRY__L(KVM_CAP_ASYNC_PF_INT),
|
---|
303 | CAP_ENTRY__L(KVM_CAP_LAST_CPU),
|
---|
304 | CAP_ENTRY__L(KVM_CAP_SMALLER_MAXPHYADDR),
|
---|
305 | CAP_ENTRY__L(KVM_CAP_S390_DIAG318),
|
---|
306 | CAP_ENTRY__L(KVM_CAP_STEAL_TIME),
|
---|
307 | #ifdef VBOX_VMM_TARGET_ARMV8
|
---|
308 | CAP_ENTRY__L(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */
|
---|
309 | CAP_ENTRY__L(KVM_CAP_X86_MSR_FILTER),
|
---|
310 | #else
|
---|
311 | CAP_ENTRY_ML(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */
|
---|
312 | CAP_ENTRY_ML(KVM_CAP_X86_MSR_FILTER),
|
---|
313 | #endif
|
---|
314 | CAP_ENTRY__L(KVM_CAP_ENFORCE_PV_FEATURE_CPUID), /* 190 */
|
---|
315 | CAP_ENTRY__L(KVM_CAP_SYS_HYPERV_CPUID),
|
---|
316 | CAP_ENTRY__L(KVM_CAP_DIRTY_LOG_RING),
|
---|
317 | CAP_ENTRY__L(KVM_CAP_X86_BUS_LOCK_EXIT),
|
---|
318 | CAP_ENTRY__L(KVM_CAP_PPC_DAWR1),
|
---|
319 | CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG2),
|
---|
320 | CAP_ENTRY__L(KVM_CAP_SGX_ATTRIBUTE),
|
---|
321 | CAP_ENTRY__L(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM),
|
---|
322 | CAP_ENTRY__L(KVM_CAP_PTP_KVM),
|
---|
323 | CAP_ENTRY__U(199),
|
---|
324 | CAP_ENTRY__U(200),
|
---|
325 | CAP_ENTRY__U(201),
|
---|
326 | CAP_ENTRY__U(202),
|
---|
327 | CAP_ENTRY__U(203),
|
---|
328 | CAP_ENTRY__U(204),
|
---|
329 | CAP_ENTRY__U(205),
|
---|
330 | CAP_ENTRY__U(206),
|
---|
331 | CAP_ENTRY__U(207),
|
---|
332 | CAP_ENTRY__U(208),
|
---|
333 | CAP_ENTRY__U(209),
|
---|
334 | CAP_ENTRY__U(210),
|
---|
335 | CAP_ENTRY__U(211),
|
---|
336 | CAP_ENTRY__U(212),
|
---|
337 | CAP_ENTRY__U(213),
|
---|
338 | CAP_ENTRY__U(214),
|
---|
339 | CAP_ENTRY__U(215),
|
---|
340 | CAP_ENTRY__U(216),
|
---|
341 | };
|
---|
342 |
|
---|
343 | LogRel(("NEM: KVM capabilities (system):\n"));
|
---|
344 | int rcRet = VINF_SUCCESS;
|
---|
345 | for (unsigned i = 0; i < RT_ELEMENTS(s_aCaps); i++)
|
---|
346 | {
|
---|
347 | int rc = ioctl(pVM->nem.s.fdKvm, KVM_CHECK_EXTENSION, s_aCaps[i].iCap);
|
---|
348 | if (rc >= 10)
|
---|
349 | LogRel(("NEM: %36s: %#x (%d)\n", s_aCaps[i].pszName, rc, rc));
|
---|
350 | else if (rc >= 0)
|
---|
351 | LogRel(("NEM: %36s: %d\n", s_aCaps[i].pszName, rc));
|
---|
352 | else
|
---|
353 | LogRel(("NEM: %s failed: %d/%d\n", s_aCaps[i].pszName, rc, errno));
|
---|
354 | switch (s_aCaps[i].cbNem)
|
---|
355 | {
|
---|
356 | case 0:
|
---|
357 | break;
|
---|
358 | case 1:
|
---|
359 | {
|
---|
360 | uint8_t *puValue = (uint8_t *)&pVM->nem.padding[s_aCaps[i].offNem];
|
---|
361 | AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
|
---|
362 | *puValue = (uint8_t)rc;
|
---|
363 | AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
|
---|
364 | break;
|
---|
365 | }
|
---|
366 | case 2:
|
---|
367 | {
|
---|
368 | uint16_t *puValue = (uint16_t *)&pVM->nem.padding[s_aCaps[i].offNem];
|
---|
369 | AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
|
---|
370 | *puValue = (uint16_t)rc;
|
---|
371 | AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
|
---|
372 | break;
|
---|
373 | }
|
---|
374 | case 4:
|
---|
375 | {
|
---|
376 | uint32_t *puValue = (uint32_t *)&pVM->nem.padding[s_aCaps[i].offNem];
|
---|
377 | AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
|
---|
378 | *puValue = (uint32_t)rc;
|
---|
379 | AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
|
---|
380 | break;
|
---|
381 | }
|
---|
382 | default:
|
---|
383 | rcRet = RTErrInfoSetF(pErrInfo, VERR_NEM_IPE_0, "s_aCaps[%u] is bad: cbNem=%#x - %s",
|
---|
384 | i, s_aCaps[i].pszName, s_aCaps[i].cbNem);
|
---|
385 | AssertFailedReturn(rcRet);
|
---|
386 | }
|
---|
387 |
|
---|
388 | /*
|
---|
389 | * Is a require non-zero entry zero or failing?
|
---|
390 | */
|
---|
391 | if (s_aCaps[i].fReqNonZero && rc <= 0)
|
---|
392 | rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE,
|
---|
393 | "Required capability '%s' is missing!", s_aCaps[i].pszName);
|
---|
394 | }
|
---|
395 |
|
---|
396 | /*
|
---|
397 | * Get per VCpu KVM_RUN MMAP area size.
|
---|
398 | */
|
---|
399 | int rc = ioctl(pVM->nem.s.fdKvm, KVM_GET_VCPU_MMAP_SIZE, 0UL);
|
---|
400 | if ((unsigned)rc < _64M)
|
---|
401 | {
|
---|
402 | pVM->nem.s.cbVCpuMmap = (uint32_t)rc;
|
---|
403 | LogRel(("NEM: %36s: %#x (%d)\n", "KVM_GET_VCPU_MMAP_SIZE", rc, rc));
|
---|
404 | }
|
---|
405 | else if (rc < 0)
|
---|
406 | rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE, "KVM_GET_VCPU_MMAP_SIZE failed: %d", errno);
|
---|
407 | else
|
---|
408 | rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_INIT_FAILED, "Odd KVM_GET_VCPU_MMAP_SIZE value: %#x (%d)", rc, rc);
|
---|
409 |
|
---|
410 | /*
|
---|
411 | * Init the slot ID bitmap.
|
---|
412 | */
|
---|
413 | ASMBitSet(&pVM->nem.s.bmSlotIds[0], 0); /* don't use slot 0 */
|
---|
414 | if (pVM->nem.s.cMaxMemSlots < _32K)
|
---|
415 | ASMBitSetRange(&pVM->nem.s.bmSlotIds[0], pVM->nem.s.cMaxMemSlots, _32K);
|
---|
416 | ASMBitSet(&pVM->nem.s.bmSlotIds[0], _32K - 1); /* don't use the last slot */
|
---|
417 |
|
---|
418 | return rcRet;
|
---|
419 | }
|
---|
420 |
|
---|
421 |
|
---|
422 | /** @callback_method_impl{FNVMMEMTRENDEZVOUS} */
|
---|
423 | static DECLCALLBACK(VBOXSTRICTRC) nemR3LnxFixThreadPoke(PVM pVM, PVMCPU pVCpu, void *pvUser)
|
---|
424 | {
|
---|
425 | RT_NOREF(pVM, pvUser);
|
---|
426 | int rc = RTThreadControlPokeSignal(pVCpu->hThread, true /*fEnable*/);
|
---|
427 | AssertLogRelRC(rc);
|
---|
428 | return VINF_SUCCESS;
|
---|
429 | }
|
---|
430 |
|
---|
431 |
|
---|
432 | /**
|
---|
433 | * Try initialize the native API.
|
---|
434 | *
|
---|
435 | * This may only do part of the job, more can be done in
|
---|
436 | * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
|
---|
437 | *
|
---|
438 | * @returns VBox status code.
|
---|
439 | * @param pVM The cross context VM structure.
|
---|
440 | * @param fFallback Whether we're in fallback mode or use-NEM mode. In
|
---|
441 | * the latter we'll fail if we cannot initialize.
|
---|
442 | * @param fForced Whether the HMForced flag is set and we should
|
---|
443 | * fail if we cannot initialize.
|
---|
444 | */
|
---|
445 | int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
|
---|
446 | {
|
---|
447 | RT_NOREF(pVM, fFallback, fForced);
|
---|
448 | /*
|
---|
449 | * Some state init.
|
---|
450 | */
|
---|
451 | pVM->nem.s.fdKvm = -1;
|
---|
452 | pVM->nem.s.fdVm = -1;
|
---|
453 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
454 | {
|
---|
455 | PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
|
---|
456 | pNemCpu->fdVCpu = -1;
|
---|
457 | }
|
---|
458 |
|
---|
459 | /*
|
---|
460 | * Error state.
|
---|
461 | * The error message will be non-empty on failure and 'rc' will be set too.
|
---|
462 | */
|
---|
463 | RTERRINFOSTATIC ErrInfo;
|
---|
464 | PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
|
---|
465 |
|
---|
466 | /*
|
---|
467 | * Open kvm subsystem so we can issue system ioctls.
|
---|
468 | */
|
---|
469 | int rc;
|
---|
470 | int fdKvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);
|
---|
471 | if (fdKvm >= 0)
|
---|
472 | {
|
---|
473 | pVM->nem.s.fdKvm = fdKvm;
|
---|
474 |
|
---|
475 | /*
|
---|
476 | * Check capabilities.
|
---|
477 | */
|
---|
478 | rc = nemR3LnxInitCheckCapabilities(pVM, pErrInfo);
|
---|
479 | if (RT_SUCCESS(rc))
|
---|
480 | {
|
---|
481 | /*
|
---|
482 | * Create an empty VM since it is recommended we check capabilities on
|
---|
483 | * the VM rather than the system descriptor.
|
---|
484 | */
|
---|
485 | #ifdef VBOX_VMM_TARGET_ARMV8
|
---|
486 | int fdVm = ioctl(fdKvm, KVM_CREATE_VM, pVM->nem.s.cIpaBits);
|
---|
487 | #else
|
---|
488 | int fdVm = ioctl(fdKvm, KVM_CREATE_VM, 0UL /* Type must be zero on x86 */);
|
---|
489 | #endif
|
---|
490 | if (fdVm >= 0)
|
---|
491 | {
|
---|
492 | pVM->nem.s.fdVm = fdVm;
|
---|
493 |
|
---|
494 | /*
|
---|
495 | * Set up the VM (more on this later).
|
---|
496 | */
|
---|
497 | rc = nemR3LnxInitSetupVm(pVM, pErrInfo);
|
---|
498 | if (RT_SUCCESS(rc))
|
---|
499 | {
|
---|
500 | /*
|
---|
501 | * Set ourselves as the execution engine and make config adjustments.
|
---|
502 | */
|
---|
503 | VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
|
---|
504 | Log(("NEM: Marked active!\n"));
|
---|
505 | PGMR3EnableNemMode(pVM);
|
---|
506 |
|
---|
507 | /*
|
---|
508 | * Register release statistics
|
---|
509 | */
|
---|
510 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
511 | {
|
---|
512 | PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
|
---|
513 | STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
|
---|
514 | STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
|
---|
515 | STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
|
---|
516 | STAMR3RegisterF(pVM, &pNemCpu->StatImportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when importing from KVM", "/NEM/CPU%u/ImportPendingInterrupt", idCpu);
|
---|
517 | STAMR3RegisterF(pVM, &pNemCpu->StatExportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when exporting to KVM", "/NEM/CPU%u/ExportPendingInterrupt", idCpu);
|
---|
518 | STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn", idCpu);
|
---|
519 | STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn1Loop, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-01-loop", idCpu);
|
---|
520 | STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn2Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-02-loops", idCpu);
|
---|
521 | STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn3Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-03-loops", idCpu);
|
---|
522 | STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn4PlusLoops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-04-to-7-loops", idCpu);
|
---|
523 | STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
|
---|
524 | STAMR3RegisterF(pVM, &pNemCpu->StatExitTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "All exits", "/NEM/CPU%u/Exit", idCpu);
|
---|
525 | STAMR3RegisterF(pVM, &pNemCpu->StatExitIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IO", "/NEM/CPU%u/Exit/Io", idCpu);
|
---|
526 | STAMR3RegisterF(pVM, &pNemCpu->StatExitMmio, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_MMIO", "/NEM/CPU%u/Exit/Mmio", idCpu);
|
---|
527 | STAMR3RegisterF(pVM, &pNemCpu->StatExitSetTpr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_SET_TRP", "/NEM/CPU%u/Exit/SetTpr", idCpu);
|
---|
528 | STAMR3RegisterF(pVM, &pNemCpu->StatExitTprAccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_TPR_ACCESS", "/NEM/CPU%u/Exit/TprAccess", idCpu);
|
---|
529 | STAMR3RegisterF(pVM, &pNemCpu->StatExitRdMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_RDMSR", "/NEM/CPU%u/Exit/RdMsr", idCpu);
|
---|
530 | STAMR3RegisterF(pVM, &pNemCpu->StatExitWrMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_WRMSR", "/NEM/CPU%u/Exit/WrMsr", idCpu);
|
---|
531 | STAMR3RegisterF(pVM, &pNemCpu->StatExitIrqWindowOpen, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IRQ_WINDOWS_OPEN", "/NEM/CPU%u/Exit/IrqWindowOpen", idCpu);
|
---|
532 | STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HLT", "/NEM/CPU%u/Exit/Hlt", idCpu);
|
---|
533 | STAMR3RegisterF(pVM, &pNemCpu->StatExitIntr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTR", "/NEM/CPU%u/Exit/Intr", idCpu);
|
---|
534 | STAMR3RegisterF(pVM, &pNemCpu->StatExitHypercall, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HYPERCALL", "/NEM/CPU%u/Exit/Hypercall", idCpu);
|
---|
535 | STAMR3RegisterF(pVM, &pNemCpu->StatExitDebug, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_DEBUG", "/NEM/CPU%u/Exit/Debug", idCpu);
|
---|
536 | STAMR3RegisterF(pVM, &pNemCpu->StatExitBusLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_BUS_LOCK", "/NEM/CPU%u/Exit/BusLock", idCpu);
|
---|
537 | STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorEmulation, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/EMULATION", "/NEM/CPU%u/Exit/InternalErrorEmulation", idCpu);
|
---|
538 | STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorFatal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/*", "/NEM/CPU%u/Exit/InternalErrorFatal", idCpu);
|
---|
539 | }
|
---|
540 |
|
---|
541 | /*
|
---|
542 | * Success.
|
---|
543 | */
|
---|
544 | return VINF_SUCCESS;
|
---|
545 | }
|
---|
546 | close(fdVm);
|
---|
547 | pVM->nem.s.fdVm = -1;
|
---|
548 |
|
---|
549 | /*
|
---|
550 | * Bail out.
|
---|
551 | */
|
---|
552 | }
|
---|
553 | else
|
---|
554 | rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_CREATE_VM failed: %u", errno);
|
---|
555 | }
|
---|
556 | close(fdKvm);
|
---|
557 | pVM->nem.s.fdKvm = -1;
|
---|
558 | }
|
---|
559 | else if (errno == EACCES)
|
---|
560 | rc = RTErrInfoSet(pErrInfo, VERR_ACCESS_DENIED, "Do not have access to open /dev/kvm for reading & writing.");
|
---|
561 | else if (errno == ENOENT)
|
---|
562 | rc = RTErrInfoSet(pErrInfo, VERR_NOT_SUPPORTED, "KVM is not availble (/dev/kvm does not exist)");
|
---|
563 | else
|
---|
564 | rc = RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno), "Failed to open '/dev/kvm': %u", errno);
|
---|
565 |
|
---|
566 | /*
|
---|
567 | * We only fail if in forced mode, otherwise just log the complaint and return.
|
---|
568 | */
|
---|
569 | Assert(RTErrInfoIsSet(pErrInfo));
|
---|
570 | if ( (fForced || !fFallback)
|
---|
571 | && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
|
---|
572 | return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
|
---|
573 | LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
|
---|
574 | return VINF_SUCCESS;
|
---|
575 | }
|
---|
576 |
|
---|
577 |
|
---|
578 | /**
|
---|
579 | * This is called after CPUMR3Init is done.
|
---|
580 | *
|
---|
581 | * @returns VBox status code.
|
---|
582 | * @param pVM The VM handle..
|
---|
583 | */
|
---|
584 | int nemR3NativeInitAfterCPUM(PVM pVM)
|
---|
585 | {
|
---|
586 | /*
|
---|
587 | * Validate sanity.
|
---|
588 | */
|
---|
589 | AssertReturn(pVM->nem.s.fdKvm >= 0, VERR_WRONG_ORDER);
|
---|
590 | AssertReturn(pVM->nem.s.fdVm >= 0, VERR_WRONG_ORDER);
|
---|
591 | AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
|
---|
592 |
|
---|
593 | /** @todo */
|
---|
594 |
|
---|
595 | return VINF_SUCCESS;
|
---|
596 | }
|
---|
597 |
|
---|
598 |
|
---|
599 | int nemR3NativeTerm(PVM pVM)
|
---|
600 | {
|
---|
601 | /*
|
---|
602 | * Per-cpu data
|
---|
603 | */
|
---|
604 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
605 | {
|
---|
606 | PVMCPU pVCpu = pVM->apCpusR3[idCpu];
|
---|
607 |
|
---|
608 | if (pVCpu->nem.s.fdVCpu != -1)
|
---|
609 | {
|
---|
610 | close(pVCpu->nem.s.fdVCpu);
|
---|
611 | pVCpu->nem.s.fdVCpu = -1;
|
---|
612 | }
|
---|
613 | if (pVCpu->nem.s.pRun)
|
---|
614 | {
|
---|
615 | munmap(pVCpu->nem.s.pRun, pVM->nem.s.cbVCpuMmap);
|
---|
616 | pVCpu->nem.s.pRun = NULL;
|
---|
617 | }
|
---|
618 | }
|
---|
619 |
|
---|
620 | /*
|
---|
621 | * Global data.
|
---|
622 | */
|
---|
623 | if (pVM->nem.s.fdVm != -1)
|
---|
624 | {
|
---|
625 | close(pVM->nem.s.fdVm);
|
---|
626 | pVM->nem.s.fdVm = -1;
|
---|
627 | }
|
---|
628 |
|
---|
629 | if (pVM->nem.s.fdKvm != -1)
|
---|
630 | {
|
---|
631 | close(pVM->nem.s.fdKvm);
|
---|
632 | pVM->nem.s.fdKvm = -1;
|
---|
633 | }
|
---|
634 | return VINF_SUCCESS;
|
---|
635 | }
|
---|
636 |
|
---|
637 |
|
---|
638 | /**
|
---|
639 | * VM reset notification.
|
---|
640 | *
|
---|
641 | * @param pVM The cross context VM structure.
|
---|
642 | */
|
---|
643 | void nemR3NativeReset(PVM pVM)
|
---|
644 | {
|
---|
645 | RT_NOREF(pVM);
|
---|
646 | }
|
---|
647 |
|
---|
648 |
|
---|
649 | /**
|
---|
650 | * Reset CPU due to INIT IPI or hot (un)plugging.
|
---|
651 | *
|
---|
652 | * @param pVCpu The cross context virtual CPU structure of the CPU being
|
---|
653 | * reset.
|
---|
654 | * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
|
---|
655 | */
|
---|
656 | void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
|
---|
657 | {
|
---|
658 | RT_NOREF(pVCpu, fInitIpi);
|
---|
659 | }
|
---|
660 |
|
---|
661 |
|
---|
662 | /*********************************************************************************************************************************
|
---|
663 | * Memory management *
|
---|
664 | *********************************************************************************************************************************/
|
---|
665 |
|
---|
666 |
|
---|
667 | /**
|
---|
668 | * Allocates a memory slot ID.
|
---|
669 | *
|
---|
670 | * @returns Slot ID on success, UINT16_MAX on failure.
|
---|
671 | */
|
---|
672 | static uint16_t nemR3LnxMemSlotIdAlloc(PVM pVM)
|
---|
673 | {
|
---|
674 | /* Use the hint first. */
|
---|
675 | uint16_t idHint = pVM->nem.s.idPrevSlot;
|
---|
676 | if (idHint < _32K - 1)
|
---|
677 | {
|
---|
678 | int32_t idx = ASMBitNextClear(&pVM->nem.s.bmSlotIds, _32K, idHint);
|
---|
679 | Assert(idx < _32K);
|
---|
680 | if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))
|
---|
681 | return pVM->nem.s.idPrevSlot = (uint16_t)idx;
|
---|
682 | }
|
---|
683 |
|
---|
684 | /*
|
---|
685 | * Search the whole map from the start.
|
---|
686 | */
|
---|
687 | int32_t idx = ASMBitFirstClear(&pVM->nem.s.bmSlotIds, _32K);
|
---|
688 | Assert(idx < _32K);
|
---|
689 | if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))
|
---|
690 | return pVM->nem.s.idPrevSlot = (uint16_t)idx;
|
---|
691 |
|
---|
692 | Assert(idx < 0 /*shouldn't trigger unless there is a race */);
|
---|
693 | return UINT16_MAX; /* caller is expected to assert. */
|
---|
694 | }
|
---|
695 |
|
---|
696 |
|
---|
697 | /**
|
---|
698 | * Frees a memory slot ID
|
---|
699 | */
|
---|
700 | static void nemR3LnxMemSlotIdFree(PVM pVM, uint16_t idSlot)
|
---|
701 | {
|
---|
702 | if (RT_LIKELY(idSlot < _32K && ASMAtomicBitTestAndClear(&pVM->nem.s.bmSlotIds, idSlot)))
|
---|
703 | { /*likely*/ }
|
---|
704 | else
|
---|
705 | AssertMsgFailed(("idSlot=%u (%#x)\n", idSlot, idSlot));
|
---|
706 | }
|
---|
707 |
|
---|
708 |
|
---|
709 |
|
---|
710 | VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
|
---|
711 | uint8_t *pu2State, uint32_t *puNemRange)
|
---|
712 | {
|
---|
713 | uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
|
---|
714 | AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
|
---|
715 |
|
---|
716 | Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d) - idSlot=%#x\n",
|
---|
717 | GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange, idSlot));
|
---|
718 |
|
---|
719 | struct kvm_userspace_memory_region Region;
|
---|
720 | Region.slot = idSlot;
|
---|
721 | Region.flags = 0;
|
---|
722 | Region.guest_phys_addr = GCPhys;
|
---|
723 | Region.memory_size = cb;
|
---|
724 | Region.userspace_addr = (uintptr_t)pvR3;
|
---|
725 |
|
---|
726 | int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
|
---|
727 | if (rc == 0)
|
---|
728 | {
|
---|
729 | *pu2State = 0;
|
---|
730 | *puNemRange = idSlot;
|
---|
731 | return VINF_SUCCESS;
|
---|
732 | }
|
---|
733 |
|
---|
734 | LogRel(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p, idSlot=%#x failed: %u/%u\n", GCPhys, cb, pvR3, idSlot, rc, errno));
|
---|
735 | nemR3LnxMemSlotIdFree(pVM, idSlot);
|
---|
736 | return VERR_NEM_MAP_PAGES_FAILED;
|
---|
737 | }
|
---|
738 |
|
---|
739 |
|
---|
740 | VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
|
---|
741 | {
|
---|
742 | RT_NOREF(pVM);
|
---|
743 | return true;
|
---|
744 | }
|
---|
745 |
|
---|
746 |
|
---|
747 | VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
|
---|
748 | void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
|
---|
749 | {
|
---|
750 | Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
|
---|
751 | GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
|
---|
752 | RT_NOREF(pvRam);
|
---|
753 |
|
---|
754 | if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
|
---|
755 | {
|
---|
756 | /** @todo implement splitting and whatnot of ranges if we want to be 100%
|
---|
757 | * conforming (just modify RAM registrations in MM.cpp to test). */
|
---|
758 | AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),
|
---|
759 | VERR_NEM_MAP_PAGES_FAILED);
|
---|
760 | }
|
---|
761 |
|
---|
762 | /*
|
---|
763 | * Register MMIO2.
|
---|
764 | */
|
---|
765 | if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
|
---|
766 | {
|
---|
767 | AssertReturn(pvMmio2, VERR_NEM_MAP_PAGES_FAILED);
|
---|
768 | AssertReturn(puNemRange, VERR_NEM_MAP_PAGES_FAILED);
|
---|
769 |
|
---|
770 | uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
|
---|
771 | AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
|
---|
772 |
|
---|
773 | struct kvm_userspace_memory_region Region;
|
---|
774 | Region.slot = idSlot;
|
---|
775 | Region.flags = fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES ? KVM_MEM_LOG_DIRTY_PAGES : 0;
|
---|
776 | Region.guest_phys_addr = GCPhys;
|
---|
777 | Region.memory_size = cb;
|
---|
778 | Region.userspace_addr = (uintptr_t)pvMmio2;
|
---|
779 |
|
---|
780 | int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
|
---|
781 | if (rc == 0)
|
---|
782 | {
|
---|
783 | *pu2State = 0;
|
---|
784 | *puNemRange = idSlot;
|
---|
785 | Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvMmio2=%p - idSlot=%#x\n",
|
---|
786 | GCPhys, cb, fFlags, pvMmio2, idSlot));
|
---|
787 | return VINF_SUCCESS;
|
---|
788 | }
|
---|
789 |
|
---|
790 | nemR3LnxMemSlotIdFree(pVM, idSlot);
|
---|
791 | AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",
|
---|
792 | GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),
|
---|
793 | VERR_NEM_MAP_PAGES_FAILED);
|
---|
794 | }
|
---|
795 |
|
---|
796 | /* MMIO, don't care. */
|
---|
797 | *pu2State = 0;
|
---|
798 | *puNemRange = UINT32_MAX;
|
---|
799 | return VINF_SUCCESS;
|
---|
800 | }
|
---|
801 |
|
---|
802 |
|
---|
803 | VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
|
---|
804 | void *pvRam, void *pvMmio2, uint32_t *puNemRange)
|
---|
805 | {
|
---|
806 | RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
|
---|
807 | return VINF_SUCCESS;
|
---|
808 | }
|
---|
809 |
|
---|
810 |
|
---|
811 | VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
|
---|
812 | void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
|
---|
813 | {
|
---|
814 | Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p puNemRange=%p (%#x)\n",
|
---|
815 | GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
|
---|
816 | RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
|
---|
817 |
|
---|
818 | if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
|
---|
819 | {
|
---|
820 | /** @todo implement splitting and whatnot of ranges if we want to be 100%
|
---|
821 | * conforming (just modify RAM registrations in MM.cpp to test). */
|
---|
822 | AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),
|
---|
823 | VERR_NEM_UNMAP_PAGES_FAILED);
|
---|
824 | }
|
---|
825 |
|
---|
826 | if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
|
---|
827 | {
|
---|
828 | uint32_t const idSlot = *puNemRange;
|
---|
829 | AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);
|
---|
830 | AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);
|
---|
831 |
|
---|
832 | struct kvm_userspace_memory_region Region;
|
---|
833 | Region.slot = idSlot;
|
---|
834 | Region.flags = 0;
|
---|
835 | Region.guest_phys_addr = GCPhys;
|
---|
836 | Region.memory_size = 0; /* this deregisters it. */
|
---|
837 | Region.userspace_addr = (uintptr_t)pvMmio2;
|
---|
838 |
|
---|
839 | int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
|
---|
840 | if (rc == 0)
|
---|
841 | {
|
---|
842 | if (pu2State)
|
---|
843 | *pu2State = 0;
|
---|
844 | *puNemRange = UINT32_MAX;
|
---|
845 | nemR3LnxMemSlotIdFree(pVM, idSlot);
|
---|
846 | return VINF_SUCCESS;
|
---|
847 | }
|
---|
848 |
|
---|
849 | AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",
|
---|
850 | GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),
|
---|
851 | VERR_NEM_UNMAP_PAGES_FAILED);
|
---|
852 | }
|
---|
853 |
|
---|
854 | if (pu2State)
|
---|
855 | *pu2State = UINT8_MAX;
|
---|
856 | return VINF_SUCCESS;
|
---|
857 | }
|
---|
858 |
|
---|
859 |
|
---|
860 | VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
|
---|
861 | void *pvBitmap, size_t cbBitmap)
|
---|
862 | {
|
---|
863 | AssertReturn(uNemRange > 0 && uNemRange < _32K, VERR_NEM_IPE_4);
|
---|
864 | AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, uNemRange), VERR_NEM_IPE_4);
|
---|
865 |
|
---|
866 | RT_NOREF(GCPhys, cbBitmap);
|
---|
867 |
|
---|
868 | struct kvm_dirty_log DirtyLog;
|
---|
869 | DirtyLog.slot = uNemRange;
|
---|
870 | DirtyLog.padding1 = 0;
|
---|
871 | DirtyLog.dirty_bitmap = pvBitmap;
|
---|
872 |
|
---|
873 | int rc = ioctl(pVM->nem.s.fdVm, KVM_GET_DIRTY_LOG, &DirtyLog);
|
---|
874 | AssertLogRelMsgReturn(rc == 0, ("%RGp LB %RGp idSlot=%#x failed: %u/%u\n", GCPhys, cb, uNemRange, errno, rc),
|
---|
875 | VERR_NEM_QUERY_DIRTY_BITMAP_FAILED);
|
---|
876 |
|
---|
877 | return VINF_SUCCESS;
|
---|
878 | }
|
---|
879 |
|
---|
880 |
|
---|
881 | VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
|
---|
882 | uint8_t *pu2State, uint32_t *puNemRange)
|
---|
883 | {
|
---|
884 | Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
|
---|
885 | *pu2State = UINT8_MAX;
|
---|
886 |
|
---|
887 | /* Don't support puttint ROM where there is already RAM. For
|
---|
888 | now just shuffle the registrations till it works... */
|
---|
889 | AssertLogRelMsgReturn(!(fFlags & NEM_NOTIFY_PHYS_ROM_F_REPLACE), ("%RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags),
|
---|
890 | VERR_NEM_MAP_PAGES_FAILED);
|
---|
891 |
|
---|
892 | /** @todo figure out how to do shadow ROMs. */
|
---|
893 |
|
---|
894 | /*
|
---|
895 | * We only allocate a slot number here in case we need to use it to
|
---|
896 | * fend of physical handler fun.
|
---|
897 | */
|
---|
898 | uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
|
---|
899 | AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
|
---|
900 |
|
---|
901 | *pu2State = 0;
|
---|
902 | *puNemRange = idSlot;
|
---|
903 | Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",
|
---|
904 | GCPhys, cb, fFlags, pvPages, idSlot));
|
---|
905 | RT_NOREF(GCPhys, cb, fFlags, pvPages);
|
---|
906 | return VINF_SUCCESS;
|
---|
907 | }
|
---|
908 |
|
---|
909 |
|
---|
910 | VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
|
---|
911 | uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
|
---|
912 | {
|
---|
913 | Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
|
---|
914 | GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
|
---|
915 |
|
---|
916 | AssertPtrReturn(pvPages, VERR_NEM_IPE_5);
|
---|
917 |
|
---|
918 | uint32_t const idSlot = *puNemRange;
|
---|
919 | AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);
|
---|
920 | AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);
|
---|
921 |
|
---|
922 | *pu2State = UINT8_MAX;
|
---|
923 |
|
---|
924 | /*
|
---|
925 | * Do the actual setting of the user pages here now that we've
|
---|
926 | * got a valid pvPages (typically isn't available during the early
|
---|
927 | * notification, unless we're replacing RAM).
|
---|
928 | */
|
---|
929 | /** @todo r=bird: if it's overlapping RAM, we shouldn't need an additional
|
---|
930 | * registration, should we? */
|
---|
931 | struct kvm_userspace_memory_region Region;
|
---|
932 | Region.slot = idSlot;
|
---|
933 | Region.flags = 0;
|
---|
934 | Region.guest_phys_addr = GCPhys;
|
---|
935 | Region.memory_size = cb;
|
---|
936 | Region.userspace_addr = (uintptr_t)pvPages;
|
---|
937 |
|
---|
938 | int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
|
---|
939 | if (rc == 0)
|
---|
940 | {
|
---|
941 | *pu2State = 0;
|
---|
942 | Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",
|
---|
943 | GCPhys, cb, fFlags, pvPages, idSlot));
|
---|
944 | return VINF_SUCCESS;
|
---|
945 | }
|
---|
946 | AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvPages=%p, idSlot=%#x failed: %u/%u\n",
|
---|
947 | GCPhys, cb, fFlags, pvPages, idSlot, errno, rc),
|
---|
948 | VERR_NEM_MAP_PAGES_FAILED);
|
---|
949 | }
|
---|
950 |
|
---|
951 |
|
---|
952 | VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
|
---|
953 | {
|
---|
954 | Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
|
---|
955 | Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
|
---|
956 | RT_NOREF(pVCpu, fEnabled);
|
---|
957 | }
|
---|
958 |
|
---|
959 |
|
---|
960 | VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
|
---|
961 | RTR3PTR pvMemR3, uint8_t *pu2State)
|
---|
962 | {
|
---|
963 | Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
|
---|
964 | GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
|
---|
965 |
|
---|
966 | *pu2State = UINT8_MAX;
|
---|
967 | RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
|
---|
968 | }
|
---|
969 |
|
---|
970 |
|
---|
971 | void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
|
---|
972 | {
|
---|
973 | Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
|
---|
974 | RT_NOREF(pVM, enmKind, GCPhys, cb);
|
---|
975 | }
|
---|
976 |
|
---|
977 |
|
---|
978 | void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
|
---|
979 | RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
|
---|
980 | {
|
---|
981 | Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
|
---|
982 | GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
|
---|
983 | RT_NOREF(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fRestoreAsRAM);
|
---|
984 | }
|
---|
985 |
|
---|
986 |
|
---|
987 | int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
|
---|
988 | PGMPAGETYPE enmType, uint8_t *pu2State)
|
---|
989 | {
|
---|
990 | Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
|
---|
991 | GCPhys, HCPhys, fPageProt, enmType, *pu2State));
|
---|
992 | RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
|
---|
993 | return VINF_SUCCESS;
|
---|
994 | }
|
---|
995 |
|
---|
996 |
|
---|
997 | VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
|
---|
998 | PGMPAGETYPE enmType, uint8_t *pu2State)
|
---|
999 | {
|
---|
1000 | Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
|
---|
1001 | GCPhys, HCPhys, fPageProt, enmType, *pu2State));
|
---|
1002 | Assert(VM_IS_NEM_ENABLED(pVM));
|
---|
1003 | RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
|
---|
1004 |
|
---|
1005 | }
|
---|
1006 |
|
---|
1007 |
|
---|
1008 | VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
|
---|
1009 | RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
|
---|
1010 | {
|
---|
1011 | Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
|
---|
1012 | GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
|
---|
1013 | Assert(VM_IS_NEM_ENABLED(pVM));
|
---|
1014 | RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
|
---|
1015 | }
|
---|