VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 56065

Last change on this file since 56065 was 55863, checked in by vboxsync, 10 years ago

IPRT,SUPDrv,VMM: Revised the context switching hook interface. Do less work when enabling the hook (formerly 'registration'). Drop the reference counting (kept internally for solaris) as it complicates restrictions wrt destroying enabled hooks. Bumped support driver version.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 41.2 KB
Line 
1/* $Id: HMInternal.h 55863 2015-05-14 18:29:34Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34#include <iprt/string.h>
35
36#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
37/* Enable 64 bits guest support. */
38# define VBOX_ENABLE_64_BITS_GUESTS
39#endif
40
41#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
42# define VMX_USE_CACHED_VMCS_ACCESSES
43#endif
44
45/** @def HM_PROFILE_EXIT_DISPATCH
46 * Enables profiling of the VM exit handler dispatching. */
47#if 0
48# define HM_PROFILE_EXIT_DISPATCH
49#endif
50
51RT_C_DECLS_BEGIN
52
53
54/** @defgroup grp_hm_int Internal
55 * @ingroup grp_hm
56 * @internal
57 * @{
58 */
59
60/** @def HMCPU_CF_CLEAR
61 * Clears a HM-context flag.
62 *
63 * @param pVCpu Pointer to the VMCPU.
64 * @param fFlag The flag to clear.
65 */
66#define HMCPU_CF_CLEAR(pVCpu, fFlag) (ASMAtomicUoAndU32(&(pVCpu)->hm.s.fContextUseFlags, ~(fFlag)))
67
68/** @def HMCPU_CF_SET
69 * Sets a HM-context flag.
70 *
71 * @param pVCpu Pointer to the VMCPU.
72 * @param fFlag The flag to set.
73 */
74#define HMCPU_CF_SET(pVCpu, fFlag) (ASMAtomicUoOrU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlag)))
75
76/** @def HMCPU_CF_IS_SET
77 * Checks if all the flags in the specified HM-context set is pending.
78 *
79 * @param pVCpu Pointer to the VMCPU.
80 * @param fFlag The flag to check.
81 */
82#define HMCPU_CF_IS_SET(pVCpu, fFlag) ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlag)) == (fFlag))
83
84/** @def HMCPU_CF_IS_PENDING
85 * Checks if one or more of the flags in the specified HM-context set is
86 * pending.
87 *
88 * @param pVCpu Pointer to the VMCPU.
89 * @param fFlags The flags to check for.
90 */
91#define HMCPU_CF_IS_PENDING(pVCpu, fFlags) RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlags))
92
93/** @def HMCPU_CF_IS_PENDING_ONLY
94 * Checks if -only- one or more of the specified HM-context flags is pending.
95 *
96 * @param pVCpu Pointer to the VMCPU.
97 * @param fFlags The flags to check for.
98 */
99#define HMCPU_CF_IS_PENDING_ONLY(pVCpu, fFlags) !RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & ~(fFlags))
100
101/** @def HMCPU_CF_IS_SET_ONLY
102 * Checks if -only- all the flags in the specified HM-context set is pending.
103 *
104 * @param pVCpu Pointer to the VMCPU.
105 * @param fFlags The flags to check for.
106 */
107#define HMCPU_CF_IS_SET_ONLY(pVCpu, fFlags) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) == (fFlags))
108
109/** @def HMCPU_CF_RESET_TO
110 * Resets the HM-context flags to the specified value.
111 *
112 * @param pVCpu Pointer to the VMCPU.
113 * @param fFlags The new value.
114 */
115#define HMCPU_CF_RESET_TO(pVCpu, fFlags) (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlags)))
116
117/** @def HMCPU_CF_VALUE
118 * Returns the current HM-context flags value.
119 *
120 * @param pVCpu Pointer to the VMCPU.
121 */
122#define HMCPU_CF_VALUE(pVCpu) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags))
123
124
125/** Resets/initializes the VM-exit/#VMEXIT history array. */
126#define HMCPU_EXIT_HISTORY_RESET(pVCpu) (memset(&(pVCpu)->hm.s.auExitHistory, 0xff, sizeof((pVCpu)->hm.s.auExitHistory)))
127
128/** Updates the VM-exit/#VMEXIT history array. */
129#define HMCPU_EXIT_HISTORY_ADD(pVCpu, a_ExitReason) \
130 do { \
131 AssertMsg((pVCpu)->hm.s.idxExitHistoryFree < RT_ELEMENTS((pVCpu)->hm.s.auExitHistory), ("%u\n", (pVCpu)->hm.s.idxExitHistoryFree)); \
132 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree++] = (uint16_t)(a_ExitReason); \
133 if ((pVCpu)->hm.s.idxExitHistoryFree == RT_ELEMENTS((pVCpu)->hm.s.auExitHistory)) \
134 (pVCpu)->hm.s.idxExitHistoryFree = 0; \
135 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree] = UINT16_MAX; \
136 } while (0)
137
138/** Maximum number of exit reason statistics counters. */
139#define MAX_EXITREASON_STAT 0x100
140#define MASK_EXITREASON_STAT 0xff
141#define MASK_INJECT_IRQ_STAT 0xff
142
143/** @name HM changed flags.
144 * These flags are used to keep track of which important registers that
145 * have been changed since last they were reset.
146 * @{
147 */
148#define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */
149#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
150#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
151#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
152#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
153#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
154#define HM_CHANGED_GUEST_TR RT_BIT(6)
155#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
156#define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */
157#define HM_CHANGED_GUEST_RIP RT_BIT(9)
158#define HM_CHANGED_GUEST_RSP RT_BIT(10)
159#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
160#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
161#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
162#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
163#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
164#define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16)
165#define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */
166#define HM_CHANGED_GUEST_XCPT_INTERCEPTS RT_BIT(18)
167/* VT-x specific state. */
168#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(19)
169#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(20)
170#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(21)
171#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(22)
172#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(23)
173/* AMD-V specific state. */
174#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(19)
175#define HM_CHANGED_SVM_RESERVED1 RT_BIT(20)
176#define HM_CHANGED_SVM_RESERVED2 RT_BIT(21)
177#define HM_CHANGED_SVM_RESERVED3 RT_BIT(22)
178#define HM_CHANGED_SVM_RESERVED4 RT_BIT(23)
179
180#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
181 | HM_CHANGED_GUEST_CR3 \
182 | HM_CHANGED_GUEST_CR4 \
183 | HM_CHANGED_GUEST_GDTR \
184 | HM_CHANGED_GUEST_IDTR \
185 | HM_CHANGED_GUEST_LDTR \
186 | HM_CHANGED_GUEST_TR \
187 | HM_CHANGED_GUEST_SEGMENT_REGS \
188 | HM_CHANGED_GUEST_DEBUG \
189 | HM_CHANGED_GUEST_RIP \
190 | HM_CHANGED_GUEST_RSP \
191 | HM_CHANGED_GUEST_RFLAGS \
192 | HM_CHANGED_GUEST_CR2 \
193 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
194 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
195 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
196 | HM_CHANGED_GUEST_EFER_MSR \
197 | HM_CHANGED_GUEST_LAZY_MSRS \
198 | HM_CHANGED_GUEST_XCPT_INTERCEPTS \
199 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
200 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
201 | HM_CHANGED_VMX_GUEST_APIC_STATE \
202 | HM_CHANGED_VMX_ENTRY_CTLS \
203 | HM_CHANGED_VMX_EXIT_CTLS)
204
205#define HM_CHANGED_HOST_CONTEXT RT_BIT(24)
206
207/* Bits shared between host and guest. */
208#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
209 | HM_CHANGED_GUEST_DEBUG \
210 | HM_CHANGED_GUEST_LAZY_MSRS)
211/** @} */
212
213/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
214#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
215
216/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
217#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
218/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
219#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
220/** Total guest mapped memory needed. */
221#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
222
223
224/** @name Macros for enabling and disabling preemption.
225 * These are really just for hiding the RTTHREADPREEMPTSTATE and asserting that
226 * preemption has already been disabled when there is no context hook.
227 * @{ */
228#ifdef VBOX_STRICT
229# define HM_DISABLE_PREEMPT() \
230 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
231 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHookIsEnabled(pVCpu)); \
232 RTThreadPreemptDisable(&PreemptStateInternal)
233#else
234# define HM_DISABLE_PREEMPT() \
235 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
236 RTThreadPreemptDisable(&PreemptStateInternal)
237#endif /* VBOX_STRICT */
238#define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)
239/** @} */
240
241
242/** Enable for TPR guest patching. */
243#define VBOX_HM_WITH_GUEST_PATCHING
244
245/** @name HM saved state versions
246 * @{
247 */
248#ifdef VBOX_HM_WITH_GUEST_PATCHING
249# define HM_SAVED_STATE_VERSION 5
250# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
251#else
252# define HM_SAVED_STATE_VERSION 4
253# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
254#endif
255#define HM_SAVED_STATE_VERSION_2_0_X 3
256/** @} */
257
258/**
259 * Global per-cpu information. (host)
260 */
261typedef struct HMGLOBALCPUINFO
262{
263 /** The CPU ID. */
264 RTCPUID idCpu;
265 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
266 RTR0MEMOBJ hMemObj;
267 /** Current ASID (AMD-V) / VPID (Intel). */
268 uint32_t uCurrentAsid;
269 /** TLB flush count. */
270 uint32_t cTlbFlushes;
271 /** Whether to flush each new ASID/VPID before use. */
272 bool fFlushAsidBeforeUse;
273 /** Configured for VT-x or AMD-V. */
274 bool fConfigured;
275 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
276 bool fIgnoreAMDVInUseError;
277 /** In use by our code. (for power suspend) */
278 volatile bool fInUse;
279} HMGLOBALCPUINFO;
280/** Pointer to the per-cpu global information. */
281typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
282
283typedef enum
284{
285 HMPENDINGIO_INVALID = 0,
286 HMPENDINGIO_PORT_READ,
287 HMPENDINGIO_PORT_WRITE,
288 HMPENDINGIO_STRING_READ,
289 HMPENDINGIO_STRING_WRITE,
290 /** The usual 32-bit paranoia. */
291 HMPENDINGIO_32BIT_HACK = 0x7fffffff
292} HMPENDINGIO;
293
294
295typedef enum
296{
297 HMTPRINSTR_INVALID,
298 HMTPRINSTR_READ,
299 HMTPRINSTR_READ_SHR4,
300 HMTPRINSTR_WRITE_REG,
301 HMTPRINSTR_WRITE_IMM,
302 HMTPRINSTR_JUMP_REPLACEMENT,
303 /** The usual 32-bit paranoia. */
304 HMTPRINSTR_32BIT_HACK = 0x7fffffff
305} HMTPRINSTR;
306
307typedef struct
308{
309 /** The key is the address of patched instruction. (32 bits GC ptr) */
310 AVLOU32NODECORE Core;
311 /** Original opcode. */
312 uint8_t aOpcode[16];
313 /** Instruction size. */
314 uint32_t cbOp;
315 /** Replacement opcode. */
316 uint8_t aNewOpcode[16];
317 /** Replacement instruction size. */
318 uint32_t cbNewOp;
319 /** Instruction type. */
320 HMTPRINSTR enmType;
321 /** Source operand. */
322 uint32_t uSrcOperand;
323 /** Destination operand. */
324 uint32_t uDstOperand;
325 /** Number of times the instruction caused a fault. */
326 uint32_t cFaults;
327 /** Patch address of the jump replacement. */
328 RTGCPTR32 pJumpTarget;
329} HMTPRPATCH;
330/** Pointer to HMTPRPATCH. */
331typedef HMTPRPATCH *PHMTPRPATCH;
332
333/**
334 * Switcher function, HC to the special 64-bit RC.
335 *
336 * @param pVM Pointer to the VM.
337 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
338 * @returns Return code indicating the action to take.
339 */
340typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
341/** Pointer to switcher function. */
342typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
343
344/**
345 * HM VM Instance data.
346 * Changes to this must checked against the padding of the hm union in VM!
347 */
348typedef struct HM
349{
350 /** Set when we've initialized VMX or SVM. */
351 bool fInitialized;
352 /** Set if nested paging is enabled. */
353 bool fNestedPaging;
354 /** Set if nested paging is allowed. */
355 bool fAllowNestedPaging;
356 /** Set if large pages are enabled (requires nested paging). */
357 bool fLargePages;
358 /** Set if we can support 64-bit guests or not. */
359 bool fAllow64BitGuests;
360 /** Set if an IO-APIC is configured for this VM. */
361 bool fHasIoApic;
362 /** Set when TPR patching is allowed. */
363 bool fTprPatchingAllowed;
364 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
365 bool fGlobalInit;
366 /** Set when TPR patching is active. */
367 bool fTPRPatchingActive;
368 bool u8Alignment[3];
369
370 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
371 uint32_t uHostKernelFeatures;
372
373 /** Maximum ASID allowed. */
374 uint32_t uMaxAsid;
375 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
376 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
377 uint32_t cMaxResumeLoops;
378
379 /** Guest allocated memory for patching purposes. */
380 RTGCPTR pGuestPatchMem;
381 /** Current free pointer inside the patch block. */
382 RTGCPTR pFreeGuestPatchMem;
383 /** Size of the guest patch memory block. */
384 uint32_t cbGuestPatchMem;
385 uint32_t u32Alignment0;
386
387#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
388 /** 32 to 64 bits switcher entrypoint. */
389 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
390 RTR0PTR pvR0Alignment0;
391#endif
392
393 struct
394 {
395 /** Set by the ring-0 side of HM to indicate VMX is supported by the
396 * CPU. */
397 bool fSupported;
398 /** Set when we've enabled VMX. */
399 bool fEnabled;
400 /** Set if VPID is supported. */
401 bool fVpid;
402 /** Set if VT-x VPID is allowed. */
403 bool fAllowVpid;
404 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
405 bool fUnrestrictedGuest;
406 /** Set if unrestricted guest execution is allowed to be used. */
407 bool fAllowUnrestricted;
408 /** Whether we're using the preemption timer or not. */
409 bool fUsePreemptTimer;
410 /** The shift mask employed by the VMX-Preemption timer. */
411 uint8_t cPreemptTimerShift;
412
413 /** Virtual address of the TSS page used for real mode emulation. */
414 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
415 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
416 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
417
418 /** Physical address of the APIC-access page. */
419 RTHCPHYS HCPhysApicAccess;
420 /** R0 memory object for the APIC-access page. */
421 RTR0MEMOBJ hMemObjApicAccess;
422 /** Virtual address of the APIC-access page. */
423 R0PTRTYPE(uint8_t *) pbApicAccess;
424
425#ifdef VBOX_WITH_CRASHDUMP_MAGIC
426 RTHCPHYS HCPhysScratch;
427 RTR0MEMOBJ hMemObjScratch;
428 R0PTRTYPE(uint8_t *) pbScratch;
429#endif
430
431 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
432 uint32_t uFlushTaggedTlb;
433 uint32_t u32Alignment0;
434 /** Host CR4 value (set by ring-0 VMX init) */
435 uint64_t u64HostCr4;
436
437 /** Host EFER value (set by ring-0 VMX init) */
438 uint64_t u64HostEfer;
439 /** Whether the CPU supports VMCS fields for swapping EFER. */
440 bool fSupportsVmcsEfer;
441 uint8_t u8Alignment2[7];
442
443 /** VMX MSR values */
444 VMXMSRS Msrs;
445
446 /** Flush types for invept & invvpid; they depend on capabilities. */
447 VMXFLUSHEPT enmFlushEpt;
448 VMXFLUSHVPID enmFlushVpid;
449
450 /** Host-physical address for a failing VMXON instruction. */
451 RTHCPHYS HCPhysVmxEnableError;
452 } vmx;
453
454 struct
455 {
456 /** Set by the ring-0 side of HM to indicate SVM is supported by the
457 * CPU. */
458 bool fSupported;
459 /** Set when we've enabled SVM. */
460 bool fEnabled;
461 /** Set if erratum 170 affects the AMD cpu. */
462 bool fAlwaysFlushTLB;
463 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
464 bool fIgnoreInUseError;
465 uint8_t u8Alignment0[4];
466
467 /** Physical address of the IO bitmap (12kb). */
468 RTHCPHYS HCPhysIOBitmap;
469 /** R0 memory object for the IO bitmap (12kb). */
470 RTR0MEMOBJ hMemObjIOBitmap;
471 /** Virtual address of the IO bitmap. */
472 R0PTRTYPE(void *) pvIOBitmap;
473
474 /* HWCR MSR (for diagnostics) */
475 uint64_t u64MsrHwcr;
476
477 /** SVM revision. */
478 uint32_t u32Rev;
479 /** SVM feature bits from cpuid 0x8000000a */
480 uint32_t u32Features;
481 } svm;
482
483 /**
484 * AVL tree with all patches (active or disabled) sorted by guest instruction
485 * address.
486 */
487 AVLOU32TREE PatchTree;
488 uint32_t cPatches;
489 HMTPRPATCH aPatches[64];
490
491 struct
492 {
493 uint32_t u32AMDFeatureECX;
494 uint32_t u32AMDFeatureEDX;
495 } cpuid;
496
497 /** Saved error from detection */
498 int32_t lLastError;
499
500 /** HMR0Init was run */
501 bool fHMR0Init;
502 bool u8Alignment1[7];
503
504 STAMCOUNTER StatTprPatchSuccess;
505 STAMCOUNTER StatTprPatchFailure;
506 STAMCOUNTER StatTprReplaceSuccess;
507 STAMCOUNTER StatTprReplaceFailure;
508} HM;
509/** Pointer to HM VM instance data. */
510typedef HM *PHM;
511
512/* Maximum number of cached entries. */
513#define VMCSCACHE_MAX_ENTRY 128
514
515/**
516 * Structure for storing read and write VMCS actions.
517 */
518typedef struct VMCSCACHE
519{
520#ifdef VBOX_WITH_CRASHDUMP_MAGIC
521 /* Magic marker for searching in crash dumps. */
522 uint8_t aMagic[16];
523 uint64_t uMagic;
524 uint64_t u64TimeEntry;
525 uint64_t u64TimeSwitch;
526 uint64_t cResume;
527 uint64_t interPD;
528 uint64_t pSwitcher;
529 uint32_t uPos;
530 uint32_t idCpu;
531#endif
532 /* CR2 is saved here for EPT syncing. */
533 uint64_t cr2;
534 struct
535 {
536 uint32_t cValidEntries;
537 uint32_t uAlignment;
538 uint32_t aField[VMCSCACHE_MAX_ENTRY];
539 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
540 } Write;
541 struct
542 {
543 uint32_t cValidEntries;
544 uint32_t uAlignment;
545 uint32_t aField[VMCSCACHE_MAX_ENTRY];
546 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
547 } Read;
548#ifdef VBOX_STRICT
549 struct
550 {
551 RTHCPHYS HCPhysCpuPage;
552 RTHCPHYS HCPhysVmcs;
553 RTGCPTR pCache;
554 RTGCPTR pCtx;
555 } TestIn;
556 struct
557 {
558 RTHCPHYS HCPhysVmcs;
559 RTGCPTR pCache;
560 RTGCPTR pCtx;
561 uint64_t eflags;
562 uint64_t cr8;
563 } TestOut;
564 struct
565 {
566 uint64_t param1;
567 uint64_t param2;
568 uint64_t param3;
569 uint64_t param4;
570 } ScratchPad;
571#endif
572} VMCSCACHE;
573/** Pointer to VMCSCACHE. */
574typedef VMCSCACHE *PVMCSCACHE;
575AssertCompileSizeAlignment(VMCSCACHE, 8);
576
577/** VMX StartVM function. */
578typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
579/** Pointer to a VMX StartVM function. */
580typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
581
582/** SVM VMRun function. */
583typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
584/** Pointer to a SVM VMRun function. */
585typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
586
587/**
588 * HM VMCPU Instance data.
589 *
590 * Note! If you change members of this struct, make sure to check if the
591 * assembly counterpart in HMInternal.mac needs to be updated as well.
592 */
593typedef struct HMCPU
594{
595 /** Set if we need to flush the TLB during the world switch. */
596 bool fForceTLBFlush;
597 /** Set when we're using VT-x or AMD-V at that moment. */
598 bool fActive;
599 /** Set when the TLB has been checked until we return from the world switch. */
600 volatile bool fCheckedTLBFlush;
601 /** Whether we're executing a single instruction. */
602 bool fSingleInstruction;
603 /** Set if we need to clear the trap flag because of single stepping. */
604 bool fClearTrapFlag;
605 /** Whether we've completed the inner HM leave function. */
606 bool fLeaveDone;
607 /** Whether we're using the hyper DR7 or guest DR7. */
608 bool fUsingHyperDR7;
609 /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */
610 bool fPreloadGuestFpu;
611 /** Set if XCR0 needs to be loaded and saved when entering and exiting guest
612 * code execution. */
613 bool fLoadSaveGuestXcr0;
614
615 /** Whether #UD needs to be intercepted (required by certain GIM providers). */
616 bool fGIMTrapXcptUD;
617 /** Whether paravirt. hypercalls are enabled. */
618 bool fHypercallsEnabled;
619 uint8_t u8Alignment0[5];
620
621 /** World switch exit counter. */
622 volatile uint32_t cWorldSwitchExits;
623 /** HM_CHANGED_* flags. */
624 volatile uint32_t fContextUseFlags;
625 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
626 * time). */
627 RTCPUID idLastCpu;
628 /** TLB flush count. */
629 uint32_t cTlbFlushes;
630 /** Current ASID in use by the VM. */
631 uint32_t uCurrentAsid;
632 /** An additional error code used for some gurus. */
633 uint32_t u32HMError;
634 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
635 uint64_t u64HostTscAux;
636
637 struct
638 {
639 /** Ring 0 handlers for VT-x. */
640 PFNHMVMXSTARTVM pfnStartVM;
641#if HC_ARCH_BITS == 32
642 uint32_t u32Alignment0;
643#endif
644 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
645 uint32_t u32PinCtls;
646 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
647 uint32_t u32ProcCtls;
648 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
649 uint32_t u32ProcCtls2;
650 /** Current VMX_VMCS32_CTRL_EXIT. */
651 uint32_t u32ExitCtls;
652 /** Current VMX_VMCS32_CTRL_ENTRY. */
653 uint32_t u32EntryCtls;
654
655 /** Current CR0 mask. */
656 uint32_t u32CR0Mask;
657 /** Current CR4 mask. */
658 uint32_t u32CR4Mask;
659 /** Current exception bitmap. */
660 uint32_t u32XcptBitmap;
661 /** The updated-guest-state mask. */
662 volatile uint32_t fUpdatedGuestState;
663 uint32_t u32Alignment1;
664
665 /** Physical address of the VM control structure (VMCS). */
666 RTHCPHYS HCPhysVmcs;
667 /** R0 memory object for the VM control structure (VMCS). */
668 RTR0MEMOBJ hMemObjVmcs;
669 /** Virtual address of the VM control structure (VMCS). */
670 R0PTRTYPE(void *) pvVmcs;
671
672 /** Physical address of the virtual APIC page for TPR caching. */
673 RTHCPHYS HCPhysVirtApic;
674 /** R0 memory object for the virtual APIC page for TPR caching. */
675 RTR0MEMOBJ hMemObjVirtApic;
676 /** Virtual address of the virtual APIC page for TPR caching. */
677 R0PTRTYPE(uint8_t *) pbVirtApic;
678
679 /** Physical address of the MSR bitmap. */
680 RTHCPHYS HCPhysMsrBitmap;
681 /** R0 memory object for the MSR bitmap. */
682 RTR0MEMOBJ hMemObjMsrBitmap;
683 /** Virtual address of the MSR bitmap. */
684 R0PTRTYPE(void *) pvMsrBitmap;
685
686 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
687 * for guest MSRs). */
688 RTHCPHYS HCPhysGuestMsr;
689 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
690 * (used for guest MSRs). */
691 RTR0MEMOBJ hMemObjGuestMsr;
692 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
693 * for guest MSRs). */
694 R0PTRTYPE(void *) pvGuestMsr;
695
696 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
697 RTHCPHYS HCPhysHostMsr;
698 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
699 RTR0MEMOBJ hMemObjHostMsr;
700 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
701 R0PTRTYPE(void *) pvHostMsr;
702
703 /** Current EPTP. */
704 RTHCPHYS HCPhysEPTP;
705
706 /** Number of guest/host MSR pairs in the auto-load/store area. */
707 uint32_t cMsrs;
708 /** Whether the host MSR values are up-to-date in the auto-load/store area. */
709 bool fUpdatedHostMsrs;
710 uint8_t u8Alignment0[3];
711
712 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
713 uint64_t u64HostLStarMsr;
714 /** Host STAR MSR value to restore lazily while leaving VT-x. */
715 uint64_t u64HostStarMsr;
716 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
717 uint64_t u64HostSFMaskMsr;
718 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
719 uint64_t u64HostKernelGSBaseMsr;
720 /** A mask of which MSRs have been swapped and need restoration. */
721 uint32_t fLazyMsrs;
722 uint32_t u32Alignment2;
723
724 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
725 uint64_t u64MsrApicBase;
726 /** Last use TSC offset value. (cached) */
727 uint64_t u64TSCOffset;
728
729 /** VMCS cache. */
730 VMCSCACHE VMCSCache;
731
732 /** Real-mode emulation state. */
733 struct
734 {
735 X86DESCATTR AttrCS;
736 X86DESCATTR AttrDS;
737 X86DESCATTR AttrES;
738 X86DESCATTR AttrFS;
739 X86DESCATTR AttrGS;
740 X86DESCATTR AttrSS;
741 X86EFLAGS Eflags;
742 uint32_t fRealOnV86Active;
743 } RealMode;
744
745 /** VT-x error-reporting (mainly for ring-3 propagation). */
746 struct
747 {
748 uint64_t u64VMCSPhys;
749 uint32_t u32VMCSRevision;
750 uint32_t u32InstrError;
751 uint32_t u32ExitReason;
752 RTCPUID idEnteredCpu;
753 RTCPUID idCurrentCpu;
754 uint32_t u32Alignment0;
755 } LastError;
756
757 /** Current state of the VMCS. */
758 uint32_t uVmcsState;
759 /** Which host-state bits to restore before being preempted. */
760 uint32_t fRestoreHostFlags;
761 /** The host-state restoration structure. */
762 VMXRESTOREHOST RestoreHost;
763
764 /** Set if guest was executing in real mode (extra checks). */
765 bool fWasInRealMode;
766 uint8_t u8Alignment1[7];
767 } vmx;
768
769 struct
770 {
771 /** Ring 0 handlers for VT-x. */
772 PFNHMSVMVMRUN pfnVMRun;
773#if HC_ARCH_BITS == 32
774 uint32_t u32Alignment0;
775#endif
776
777 /** Physical address of the host VMCB which holds additional host-state. */
778 RTHCPHYS HCPhysVmcbHost;
779 /** R0 memory object for the host VMCB which holds additional host-state. */
780 RTR0MEMOBJ hMemObjVmcbHost;
781 /** Virtual address of the host VMCB which holds additional host-state. */
782 R0PTRTYPE(void *) pvVmcbHost;
783
784 /** Physical address of the guest VMCB. */
785 RTHCPHYS HCPhysVmcb;
786 /** R0 memory object for the guest VMCB. */
787 RTR0MEMOBJ hMemObjVmcb;
788 /** Virtual address of the guest VMCB. */
789 R0PTRTYPE(void *) pvVmcb;
790
791 /** Physical address of the MSR bitmap (8 KB). */
792 RTHCPHYS HCPhysMsrBitmap;
793 /** R0 memory object for the MSR bitmap (8 KB). */
794 RTR0MEMOBJ hMemObjMsrBitmap;
795 /** Virtual address of the MSR bitmap. */
796 R0PTRTYPE(void *) pvMsrBitmap;
797
798 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
799 * we should check if the VTPR changed on every VM-exit. */
800 bool fSyncVTpr;
801 uint8_t u8Alignment0[7];
802 } svm;
803
804 /** Event injection state. */
805 struct
806 {
807 uint32_t fPending;
808 uint32_t u32ErrCode;
809 uint32_t cbInstr;
810 uint32_t u32Padding; /**< Explicit alignment padding. */
811 uint64_t u64IntInfo;
812 RTGCUINTPTR GCPtrFaultAddress;
813 } Event;
814
815 /** IO Block emulation state. */
816 struct
817 {
818 bool fEnabled;
819 uint8_t u8Align[7];
820
821 /** RIP at the start of the io code we wish to emulate in the recompiler. */
822 RTGCPTR GCPtrFunctionEip;
823
824 uint64_t cr0;
825 } EmulateIoBlock;
826
827 struct
828 {
829 /** Pending IO operation type. */
830 HMPENDINGIO enmType;
831 uint32_t u32Alignment0;
832 RTGCPTR GCPtrRip;
833 RTGCPTR GCPtrRipNext;
834 union
835 {
836 struct
837 {
838 uint32_t uPort;
839 uint32_t uAndVal;
840 uint32_t cbSize;
841 } Port;
842 uint64_t aRaw[2];
843 } s;
844 } PendingIO;
845
846 /** The PAE PDPEs used with Nested Paging (only valid when
847 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
848 X86PDPE aPdpes[4];
849
850 /** Current shadow paging mode. */
851 PGMMODE enmShadowMode;
852
853 /** The CPU ID of the CPU currently owning the VMCS. Set in
854 * HMR0Enter and cleared in HMR0Leave. */
855 RTCPUID idEnteredCpu;
856
857 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
858 struct
859 {
860 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
861 uint32_t cPages;
862 uint32_t u32Alignment0; /**< Explicit alignment padding. */
863 } TlbShootdown;
864
865 /** VT-x/AMD-V VM-exit/#VMXEXIT history, circular array. */
866 uint16_t auExitHistory[31];
867 /** The index of the next free slot in the history array. */
868 uint16_t idxExitHistoryFree;
869
870 /** For saving stack space, the disassembler state is allocated here instead of
871 * on the stack. */
872 DISCPUSTATE DisState;
873
874 STAMPROFILEADV StatEntry;
875 STAMPROFILEADV StatExit1;
876 STAMPROFILEADV StatExit2;
877 STAMPROFILEADV StatExitIO;
878 STAMPROFILEADV StatExitMovCRx;
879 STAMPROFILEADV StatExitXcptNmi;
880 STAMPROFILEADV StatLoadGuestState;
881 STAMPROFILEADV StatInGC;
882
883#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
884 STAMPROFILEADV StatWorldSwitch3264;
885#endif
886 STAMPROFILEADV StatPoke;
887 STAMPROFILEADV StatSpinPoke;
888 STAMPROFILEADV StatSpinPokeFailed;
889
890 STAMCOUNTER StatInjectInterrupt;
891 STAMCOUNTER StatInjectXcpt;
892 STAMCOUNTER StatInjectPendingReflect;
893
894 STAMCOUNTER StatExitAll;
895 STAMCOUNTER StatExitShadowNM;
896 STAMCOUNTER StatExitGuestNM;
897 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
898 STAMCOUNTER StatExitShadowPFEM;
899 STAMCOUNTER StatExitGuestPF;
900 STAMCOUNTER StatExitGuestUD;
901 STAMCOUNTER StatExitGuestSS;
902 STAMCOUNTER StatExitGuestNP;
903 STAMCOUNTER StatExitGuestTS;
904 STAMCOUNTER StatExitGuestGP;
905 STAMCOUNTER StatExitGuestDE;
906 STAMCOUNTER StatExitGuestDB;
907 STAMCOUNTER StatExitGuestMF;
908 STAMCOUNTER StatExitGuestBP;
909 STAMCOUNTER StatExitGuestXF;
910 STAMCOUNTER StatExitGuestXcpUnk;
911 STAMCOUNTER StatExitInvlpg;
912 STAMCOUNTER StatExitInvd;
913 STAMCOUNTER StatExitWbinvd;
914 STAMCOUNTER StatExitPause;
915 STAMCOUNTER StatExitCpuid;
916 STAMCOUNTER StatExitRdtsc;
917 STAMCOUNTER StatExitRdtscp;
918 STAMCOUNTER StatExitRdpmc;
919 STAMCOUNTER StatExitVmcall;
920 STAMCOUNTER StatExitRdrand;
921 STAMCOUNTER StatExitCli;
922 STAMCOUNTER StatExitSti;
923 STAMCOUNTER StatExitPushf;
924 STAMCOUNTER StatExitPopf;
925 STAMCOUNTER StatExitIret;
926 STAMCOUNTER StatExitInt;
927 STAMCOUNTER StatExitCRxWrite[16];
928 STAMCOUNTER StatExitCRxRead[16];
929 STAMCOUNTER StatExitDRxWrite;
930 STAMCOUNTER StatExitDRxRead;
931 STAMCOUNTER StatExitRdmsr;
932 STAMCOUNTER StatExitWrmsr;
933 STAMCOUNTER StatExitClts;
934 STAMCOUNTER StatExitXdtrAccess;
935 STAMCOUNTER StatExitHlt;
936 STAMCOUNTER StatExitMwait;
937 STAMCOUNTER StatExitMonitor;
938 STAMCOUNTER StatExitLmsw;
939 STAMCOUNTER StatExitIOWrite;
940 STAMCOUNTER StatExitIORead;
941 STAMCOUNTER StatExitIOStringWrite;
942 STAMCOUNTER StatExitIOStringRead;
943 STAMCOUNTER StatExitIntWindow;
944 STAMCOUNTER StatExitExtInt;
945 STAMCOUNTER StatExitHostNmiInGC;
946 STAMCOUNTER StatExitPreemptTimer;
947 STAMCOUNTER StatExitTprBelowThreshold;
948 STAMCOUNTER StatExitTaskSwitch;
949 STAMCOUNTER StatExitMtf;
950 STAMCOUNTER StatExitApicAccess;
951 STAMCOUNTER StatPendingHostIrq;
952
953 STAMCOUNTER StatPreemptPreempting;
954 STAMCOUNTER StatPreemptSaveHostState;
955
956 STAMCOUNTER StatFlushPage;
957 STAMCOUNTER StatFlushPageManual;
958 STAMCOUNTER StatFlushPhysPageManual;
959 STAMCOUNTER StatFlushTlb;
960 STAMCOUNTER StatFlushTlbManual;
961 STAMCOUNTER StatFlushTlbWorldSwitch;
962 STAMCOUNTER StatNoFlushTlbWorldSwitch;
963 STAMCOUNTER StatFlushEntire;
964 STAMCOUNTER StatFlushAsid;
965 STAMCOUNTER StatFlushNestedPaging;
966 STAMCOUNTER StatFlushTlbInvlpgVirt;
967 STAMCOUNTER StatFlushTlbInvlpgPhys;
968 STAMCOUNTER StatTlbShootdown;
969 STAMCOUNTER StatTlbShootdownFlush;
970
971 STAMCOUNTER StatSwitchGuestIrq;
972 STAMCOUNTER StatSwitchHmToR3FF;
973 STAMCOUNTER StatSwitchExitToR3;
974 STAMCOUNTER StatSwitchLongJmpToR3;
975 STAMCOUNTER StatSwitchMaxResumeLoops;
976 STAMCOUNTER StatSwitchHltToR3;
977 STAMCOUNTER StatSwitchApicAccessToR3;
978
979 STAMCOUNTER StatTscParavirt;
980 STAMCOUNTER StatTscOffset;
981 STAMCOUNTER StatTscIntercept;
982
983 STAMCOUNTER StatExitReasonNpf;
984 STAMCOUNTER StatDRxArmed;
985 STAMCOUNTER StatDRxContextSwitch;
986 STAMCOUNTER StatDRxIoCheck;
987
988 STAMCOUNTER StatLoadMinimal;
989 STAMCOUNTER StatLoadFull;
990
991 STAMCOUNTER StatVmxCheckBadRmSelBase;
992 STAMCOUNTER StatVmxCheckBadRmSelLimit;
993 STAMCOUNTER StatVmxCheckRmOk;
994
995 STAMCOUNTER StatVmxCheckBadSel;
996 STAMCOUNTER StatVmxCheckBadRpl;
997 STAMCOUNTER StatVmxCheckBadLdt;
998 STAMCOUNTER StatVmxCheckBadTr;
999 STAMCOUNTER StatVmxCheckPmOk;
1000
1001#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1002 STAMCOUNTER StatFpu64SwitchBack;
1003 STAMCOUNTER StatDebug64SwitchBack;
1004#endif
1005
1006#ifdef VBOX_WITH_STATISTICS
1007 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
1008 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
1009 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
1010 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
1011#endif
1012#ifdef HM_PROFILE_EXIT_DISPATCH
1013 STAMPROFILEADV StatExitDispatch;
1014#endif
1015} HMCPU;
1016/** Pointer to HM VMCPU instance data. */
1017typedef HMCPU *PHMCPU;
1018AssertCompileMemberAlignment(HMCPU, vmx, 8);
1019AssertCompileMemberAlignment(HMCPU, svm, 8);
1020AssertCompileMemberAlignment(HMCPU, Event, 8);
1021
1022
1023#ifdef IN_RING0
1024VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
1025VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
1026
1027
1028# ifdef VBOX_STRICT
1029VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
1030VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
1031# else
1032# define HMDumpRegs(a, b ,c) do { } while (0)
1033# define HMR0DumpDescriptor(a, b, c) do { } while (0)
1034# endif /* VBOX_STRICT */
1035
1036# ifdef VBOX_WITH_KERNEL_USING_XMM
1037DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1038DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1039# endif
1040
1041# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1042/**
1043 * Gets 64-bit GDTR and IDTR on darwin.
1044 * @param pGdtr Where to store the 64-bit GDTR.
1045 * @param pIdtr Where to store the 64-bit IDTR.
1046 */
1047DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
1048
1049/**
1050 * Gets 64-bit CR3 on darwin.
1051 * @returns CR3
1052 */
1053DECLASM(uint64_t) HMR0Get64bitCR3(void);
1054# endif /* VBOX_WITH_HYBRID_32BIT_KERNEL */
1055
1056#endif /* IN_RING0 */
1057
1058/** @} */
1059
1060RT_C_DECLS_END
1061
1062#endif
1063
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette