VirtualBox

source: vbox/trunk/src/VBox/VMM/VBoxVMM.d@ 89088

Last change on this file since 89088 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.8 KB
Line 
1/* $Id: VBoxVMM.d 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * VBoxVMM - Static dtrace probes.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18provider vboxvmm
19{
20 probe em__state__changed(struct VMCPU *a_pVCpu, int a_enmOldState, int a_enmNewState, int a_rc);
21 /*^^VMM-ALT-TP: "%d -> %d (rc=%d)", a_enmOldState, a_enmNewState, a_rc */
22
23 probe em__state__unchanged(struct VMCPU *a_pVCpu, int a_enmState, int a_rc);
24 /*^^VMM-ALT-TP: "%d (rc=%d)", a_enmState, a_rc */
25
26 probe em__raw__run__pre(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
27 /*^^VMM-ALT-TP: "%04x:%08llx", (a_pCtx)->cs, (a_pCtx)->rip */
28
29 probe em__raw__run__ret(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, int a_rc);
30 /*^^VMM-ALT-TP: "%04x:%08llx rc=%d", (a_pCtx)->cs, (a_pCtx)->rip, (a_rc) */
31
32 probe em__ff__high(struct VMCPU *a_pVCpu, uint32_t a_fGlobal, uint64_t a_fLocal, int a_rc);
33 /*^^VMM-ALT-TP: "vm=%#x cpu=%#x rc=%d", (a_fGlobal), (a_fLocal), (a_rc) */
34
35 probe em__ff__all(struct VMCPU *a_pVCpu, uint32_t a_fGlobal, uint64_t a_fLocal, int a_rc);
36 /*^^VMM-ALT-TP: "vm=%#x cpu=%#x rc=%d", (a_fGlobal), (a_fLocal), (a_rc) */
37
38 probe em__ff__all__ret(struct VMCPU *a_pVCpu, int a_rc);
39 /*^^VMM-ALT-TP: "%d", (a_rc) */
40
41 probe em__ff__raw(struct VMCPU *a_pVCpu, uint32_t a_fGlobal, uint64_t a_fLocal);
42 /*^^VMM-ALT-TP: "vm=%#x cpu=%#x", (a_fGlobal), (a_fLocal) */
43
44 probe em__ff__raw_ret(struct VMCPU *a_pVCpu, int a_rc);
45 /*^^VMM-ALT-TP: "%d", (a_rc) */
46
47 probe pdm__irq__get( struct VMCPU *a_pVCpu, uint32_t a_uTag, uint32_t a_idSource, uint32_t a_iIrq);
48 probe pdm__irq__high(struct VMCPU *a_pVCpu, uint32_t a_uTag, uint32_t a_idSource);
49 probe pdm__irq__low( struct VMCPU *a_pVCpu, uint32_t a_uTag, uint32_t a_idSource);
50 probe pdm__irq__hilo(struct VMCPU *a_pVCpu, uint32_t a_uTag, uint32_t a_idSource);
51
52
53 probe r0__gvmm__vm__created(void *a_pGVM, void *a_pVM, uint32_t a_Pid, void *a_hEMT0, uint32_t a_cCpus);
54 probe r0__hmsvm__vmexit(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint64_t a_ExitCode, struct SVMVMCB *a_pVmcb);
55 probe r0__hmvmx__vmexit(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint64_t a_ExitReason, uint64_t a_ExitQualification);
56 probe r0__hmvmx__vmexit__noctx(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pIncompleteCtx, uint64_t a_ExitReason);
57
58 probe r0__vmm__return__to__ring3__rc(struct VMCPU *a_pVCpu, struct CPUMCTX *p_Ctx, int a_rc);
59 probe r0__vmm__return__to__ring3__hm(struct VMCPU *a_pVCpu, struct CPUMCTX *p_Ctx, int a_rc);
60 probe r0__vmm__return__to__ring3__nem(struct VMCPU *a_pVCpu, struct CPUMCTX *p_Ctx, int a_rc);
61
62
63 /** @name CPU Exception probes
64 * These probes will intercept guest CPU exceptions as best we
65 * can. In some execution modes some of these probes may also
66 * see non-guest exceptions as we don't try distiguish between
67 * virtualization and guest exceptions before firing the probes.
68 *
69 * Using these probes may have a performance impact on guest
70 * activities involving lots of exceptions.
71 * @{
72 */
73 /** \#DE - integer divide error. */
74 probe xcpt__de(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
75 /** \#DB - debug fault / trap. */
76 probe xcpt__db(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint64_t a_dr6);
77 /** \#BP - breakpoint (INT3). */
78 probe xcpt__bp(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
79 /** \#OF - overflow (INTO). */
80 probe xcpt__of(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
81 /** \#BR - bound range exceeded (BOUND). */
82 probe xcpt__br(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
83 /** \#UD - undefined opcode. */
84 probe xcpt__ud(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
85 /** \#NM - FPU not avaible and more. */
86 probe xcpt__nm(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
87 /** \#DF - double fault. */
88 probe xcpt__df(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
89 /** \#TS - TSS related fault. */
90 probe xcpt__ts(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_uErr);
91 /** \#NP - segment not present. */
92 probe xcpt__np(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_uErr);
93 /** \#SS - stack segment fault. */
94 probe xcpt__ss(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_uErr);
95 /** \#GP - general protection fault. */
96 probe xcpt__gp(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_uErr);
97 /** \#PF - page fault. */
98 probe xcpt__pf(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_uErr, uint64_t a_cr2);
99 /** \#MF - math fault (FPU). */
100 probe xcpt__mf(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
101 /** \#AC - alignment check. */
102 probe xcpt__ac(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
103 /** \#XF - SIMD floating point exception. */
104 probe xcpt__xf(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
105 /** \#VE - virtualization exception. */
106 probe xcpt__ve(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
107 /** \#SX - security exception. */
108 probe xcpt__sx(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_uErr);
109 /** @} */
110
111
112 /** Software interrupt (INT XXh).
113 * It may be very difficult to implement this probe when using hardware
114 * virtualization, so maybe we have to drop it... */
115 probe int__software(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iInterrupt);
116 /** Hardware interrupt being dispatched.
117 *
118 * Relates to pdm__irq__get ...
119 */
120 probe int__hardware(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iInterrupt, uint32_t a_uTag, uint32_t a_idSource);
121
122 /** @name Instruction probes
123 * These are instructions normally related to VM exits. These
124 * probes differs from the exit probes in that we will try make
125 * these instructions cause exits and fire the probe whenever
126 * they are executed by the guest. This means some of these
127 * probes will have a noticable performance impact (like
128 * instr__pause).
129 * @{ */
130 /** Instruction: HALT */
131 probe instr__halt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
132 /** Instruction: MWAIT */
133 probe instr__mwait(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
134 /** Instruction: MONITOR */
135 probe instr__monitor(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
136 /** Instruction: CPUID instruction (missing stuff in raw-mode). */
137 probe instr__cpuid(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t uLeaf, uint32_t uSubLeaf);
138 /** Instruction: INVD */
139 probe instr__invd(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
140 /** Instruction: WBINVD */
141 probe instr__wbinvd(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
142 /** Instruction: INVLPG */
143 probe instr__invlpg(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
144 /** Instruction: RDTSC */
145 probe instr__rdtsc(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
146 /** Instruction: RDTSCP */
147 probe instr__rdtscp(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
148 /** Instruction: RDPMC */
149 probe instr__rdpmc(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
150 /** Instruction: RDMSR */
151 probe instr__rdmsr(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_idMsr);
152 /** Instruction: WRMSR */
153 probe instr__wrmsr(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_idMsr, uint64_t a_uValue);
154 /** Instruction: CRx read instruction (missing smsw in raw-mode,
155 * and reads in general in VT-x). */
156 probe instr__crx__read(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iReg);
157 /** Instruction: CRx write instruction. */
158 probe instr__crx__write(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iReg);
159 /** Instruction: DRx read instruction. */
160 probe instr__drx__read(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iReg);
161 /** Instruction: DRx write instruction. */
162 probe instr__drx__write(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iReg);
163 /** Instruction: PAUSE instruction (not in raw-mode). */
164 probe instr__pause(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
165 /** Instruction: XSETBV */
166 probe instr__xsetbv(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
167 /** Instruction: SIDT */
168 probe instr__sidt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
169 /** Instruction: LIDT */
170 probe instr__lidt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
171 /** Instruction: SGDT */
172 probe instr__sgdt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
173 /** Instruction: LGDT */
174 probe instr__lgdt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
175 /** Instruction: SLDT */
176 probe instr__sldt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
177 /** Instruction: LLDT */
178 probe instr__lldt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
179 /** Instruction: STR */
180 probe instr__str(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
181 /** Instruction: LTR */
182 probe instr__ltr(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
183 /** Instruction: GETSEC */
184 probe instr__getsec(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
185 /** Instruction: RSM */
186 probe instr__rsm(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
187 /** Instruction: RDRAND */
188 probe instr__rdrand(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
189 /** Instruction: RDSEED */
190 probe instr__rdseed(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
191 /** Instruction: XSAVES */
192 probe instr__xsaves(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
193 /** Instruction: XRSTORS */
194 probe instr__xrstors(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
195 /** Instruction: VMCALL (intel) or VMMCALL (AMD) instruction. */
196 probe instr__vmm__call(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
197
198 /** Instruction: VT-x VMCLEAR instruction. */
199 probe instr__vmx__vmclear(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
200 /** Instruction: VT-x VMLAUNCH */
201 probe instr__vmx__vmlaunch(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
202 /** Instruction: VT-x VMPTRLD */
203 probe instr__vmx__vmptrld(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
204 /** Instruction: VT-x VMPTRST */
205 probe instr__vmx__vmptrst(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
206 /** Instruction: VT-x VMREAD */
207 probe instr__vmx__vmread(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
208 /** Instruction: VT-x VMRESUME */
209 probe instr__vmx__vmresume(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
210 /** Instruction: VT-x VMWRITE */
211 probe instr__vmx__vmwrite(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
212 /** Instruction: VT-x VMXOFF */
213 probe instr__vmx__vmxoff(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
214 /** Instruction: VT-x VMXON */
215 probe instr__vmx__vmxon(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
216 /** Instruction: VT-x VMFUNC */
217 probe instr__vmx__vmfunc(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
218 /** Instruction: VT-x INVEPT */
219 probe instr__vmx__invept(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
220 /** Instruction: VT-x INVVPID */
221 probe instr__vmx__invvpid(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
222 /** Instruction: VT-x INVPCID */
223 probe instr__vmx__invpcid(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
224
225 /** Instruction: AMD-V VMRUN */
226 probe instr__svm__vmrun(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
227 /** Instruction: AMD-V VMLOAD */
228 probe instr__svm__vmload(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
229 /** Instruction: AMD-V VMSAVE */
230 probe instr__svm__vmsave(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
231 /** Instruction: AMD-V STGI */
232 probe instr__svm__stgi(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
233 /** Instruction: AMD-V CLGI */
234 probe instr__svm__clgi(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
235 /** @} */
236
237
238 /** @name VM exit probes
239 * These are named exits with (in some cases at least) useful
240 * information as arguments. Unlike the instruction probes,
241 * these will not change the number of VM exits and have much
242 * less of an impact on VM performance.
243 * @{ */
244 /** VM Exit: Task switch. */
245 probe exit__task__switch(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
246 /** VM Exit: HALT instruction.
247 * @todo not yet implemented. */
248 probe exit__halt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
249 /** VM Exit: MWAIT instruction. */
250 probe exit__mwait(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
251 /** VM Exit: MONITOR instruction. */
252 probe exit__monitor(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
253 /** VM Exit: CPUID instruction (missing stuff in raw-mode). */
254 probe exit__cpuid(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t uLeaf, uint32_t uSubLeaf);
255 /** VM Exit: INVD instruction. */
256 probe exit__invd(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
257 /** VM Exit: WBINVD instruction. */
258 probe exit__wbinvd(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
259 /** VM Exit: INVLPG instruction. */
260 probe exit__invlpg(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
261 /** VM Exit: RDTSC instruction. */
262 probe exit__rdtsc(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
263 /** VM Exit: RDTSCP instruction. */
264 probe exit__rdtscp(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
265 /** VM Exit: RDPMC instruction. */
266 probe exit__rdpmc(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
267 /** VM Exit: RDMSR instruction. */
268 probe exit__rdmsr(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_idMsr);
269 /** VM Exit: WRMSR instruction. */
270 probe exit__wrmsr(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint32_t a_idMsr, uint64_t a_uValue);
271 /** VM Exit: CRx read instruction (missing smsw in raw-mode,
272 * and reads in general in VT-x). */
273 probe exit__crx__read(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iReg);
274 /** VM Exit: CRx write instruction. */
275 probe exit__crx__write(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iReg);
276 /** VM Exit: DRx read instruction. */
277 probe exit__drx__read(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iReg);
278 /** VM Exit: DRx write instruction. */
279 probe exit__drx__write(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx, uint8_t a_iReg);
280 /** VM Exit: PAUSE instruction (not in raw-mode). */
281 probe exit__pause(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
282 /** VM Exit: XSETBV instruction. */
283 probe exit__xsetbv(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
284 /** VM Exit: SIDT instruction. */
285 probe exit__sidt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
286 /** VM Exit: LIDT instruction. */
287 probe exit__lidt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
288 /** VM Exit: SGDT instruction. */
289 probe exit__sgdt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
290 /** VM Exit: LGDT instruction. */
291 probe exit__lgdt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
292 /** VM Exit: SLDT instruction. */
293 probe exit__sldt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
294 /** VM Exit: LLDT instruction. */
295 probe exit__lldt(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
296 /** VM Exit: STR instruction. */
297 probe exit__str(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
298 /** VM Exit: LTR instruction. */
299 probe exit__ltr(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
300 /** VM Exit: GETSEC instruction. */
301 probe exit__getsec(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
302 /** VM Exit: RSM instruction. */
303 probe exit__rsm(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
304 /** VM Exit: RDRAND instruction. */
305 probe exit__rdrand(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
306 /** VM Exit: RDSEED instruction. */
307 probe exit__rdseed(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
308 /** VM Exit: XSAVES instruction. */
309 probe exit__xsaves(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
310 /** VM Exit: XRSTORS instruction. */
311 probe exit__xrstors(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
312 /** VM Exit: VMCALL (intel) or VMMCALL (AMD) instruction. */
313 probe exit__vmm__call(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
314
315 /** VM Exit: VT-x VMCLEAR instruction. */
316 probe exit__vmx__vmclear(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
317 /** VM Exit: VT-x VMLAUNCH instruction. */
318 probe exit__vmx__vmlaunch(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
319 /** VM Exit: VT-x VMPTRLD instruction. */
320 probe exit__vmx__vmptrld(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
321 /** VM Exit: VT-x VMPTRST instruction. */
322 probe exit__vmx__vmptrst(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
323 /** VM Exit: VT-x VMREAD instruction. */
324 probe exit__vmx__vmread(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
325 /** VM Exit: VT-x VMRESUME instruction. */
326 probe exit__vmx__vmresume(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
327 /** VM Exit: VT-x VMWRITE instruction. */
328 probe exit__vmx__vmwrite(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
329 /** VM Exit: VT-x VMXOFF instruction. */
330 probe exit__vmx__vmxoff(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
331 /** VM Exit: VT-x VMXON instruction. */
332 probe exit__vmx__vmxon(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
333 /** VM Exit: VT-x VMFUNC instruction. */
334 probe exit__vmx__vmfunc(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
335 /** VM Exit: VT-x INVEPT instruction. */
336 probe exit__vmx__invept(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
337 /** VM Exit: VT-x INVVPID instruction. */
338 probe exit__vmx__invvpid(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
339 /** VM Exit: VT-x INVPCID instruction. */
340 probe exit__vmx__invpcid(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
341 /** VM Exit: VT-x EPT violation. */
342 probe exit__vmx__ept__violation(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
343 /** VM Exit: VT-x EPT misconfiguration. */
344 probe exit__vmx__ept__misconfig(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
345 /** VM Exit: VT-x Virtual APIC page access. */
346 probe exit__vmx__vapic__access(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
347 /** VM Exit: VT-x Virtual APIC page write needing virtualizing. */
348 probe exit__vmx__vapic__write(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
349
350 /** VM Exit: AMD-V VMRUN instruction. */
351 probe exit__svm__vmrun(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
352 /** VM Exit: AMD-V VMLOAD instruction. */
353 probe exit__svm__vmload(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
354 /** VM Exit: AMD-V VMSAVE instruction. */
355 probe exit__svm__vmsave(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
356 /** VM Exit: AMD-V STGI instruction. */
357 probe exit__svm__stgi(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
358 /** VM Exit: AMD-V CLGI instruction. */
359 probe exit__svm__clgi(struct VMCPU *a_pVCpu, struct CPUMCTX *a_pCtx);
360 /** @} */
361
362
363 /** @name IPRT tracepoints we link in.
364 * @{ */
365 probe iprt__critsect__entered(void *a_pvCritSect, const char *a_pszLaterNm, int32_t a_cLockers, uint32_t a_cNestings);
366 probe iprt__critsect__leaving(void *a_pvCritSect, const char *a_pszLaterNm, int32_t a_cLockers, uint32_t a_cNestings);
367 probe iprt__critsect__waiting(void *a_pvCritSect, const char *a_pszLaterNm, int32_t a_cLockers, void *a_pvNativeThreadOwner);
368 probe iprt__critsect__busy( void *a_pvCritSect, const char *a_pszLaterNm, int32_t a_cLockers, void *a_pvNativeThreadOwner);
369
370 probe iprt__critsectrw__excl_entered(void *a_pvCritSect, const char *a_pszLaterNm, uint32_t a_cNestings,
371 uint32_t a_cWaitingReaders, uint32_t cWriters);
372 probe iprt__critsectrw__excl_leaving(void *a_pvCritSect, const char *a_pszLaterNm, uint32_t a_cNestings,
373 uint32_t a_cWaitingReaders, uint32_t cWriters);
374 probe iprt__critsectrw__excl_waiting(void *a_pvCritSect, const char *a_pszLaterNm, uint8_t a_fWriteMode, uint32_t a_cWaitingReaders,
375 uint32_t a_cReaders, uint32_t a_cWriters, void *a_pvNativeOwnerThread);
376 probe iprt__critsectrw__excl_busy( void *a_pvCritSect, const char *a_pszLaterNm, uint8_t a_fWriteMode, uint32_t a_cWaitingReaders,
377 uint32_t a_cReaders, uint32_t a_cWriters, void *a_pvNativeOwnerThread);
378 probe iprt__critsectrw__excl_entered_shared(void *a_pvCritSect, const char *a_pszLaterNm, uint32_t a_cNestings,
379 uint32_t a_cWaitingReaders, uint32_t a_cWriters);
380 probe iprt__critsectrw__excl_leaving_shared(void *a_pvCritSect, const char *a_pszLaterNm, uint32_t a_cNestings,
381 uint32_t a_cWaitingReaders, uint32_t a_cWriters);
382 probe iprt__critsectrw__shared_entered(void *a_pvCritSect, const char *a_pszLaterNm, uint32_t a_cReaders, uint32_t a_cNestings);
383 probe iprt__critsectrw__shared_leaving(void *a_pvCritSect, const char *a_pszLaterNm, uint32_t a_cReaders, uint32_t a_cNestings);
384 probe iprt__critsectrw__shared_waiting(void *a_pvCritSect, const char *a_pszLaterNm, void *a_pvNativeThreadOwner,
385 uint32_t cWaitingReaders, uint32_t cWriters);
386 probe iprt__critsectrw__shared_busy( void *a_pvCritSect, const char *a_pszLaterNm, void *a_pvNativeThreadOwner,
387 uint32_t a_cWaitingReaders, uint32_t a_cWriters);
388
389 /** @} */
390};
391
392#pragma D attributes Evolving/Evolving/Common provider vboxvmm provider
393#pragma D attributes Private/Private/Unknown provider vboxvmm module
394#pragma D attributes Private/Private/Unknown provider vboxvmm function
395#pragma D attributes Evolving/Evolving/Common provider vboxvmm name
396#pragma D attributes Evolving/Evolving/Common provider vboxvmm args
397
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette