VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-x86.asm@ 80274

Last change on this file since 80274 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 12.0 KB
Line 
1; $Id: VMMR0JmpA-x86.asm 76553 2019-01-01 01:45:53Z vboxsync $
2;; @file
3; VMM - R0 SetJmp / LongJmp routines for X86.
4;
5
6;
7; Copyright (C) 2006-2019 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VMMInternal.mac"
23%include "VBox/err.mac"
24%include "VBox/param.mac"
25
26
27;*******************************************************************************
28;* Defined Constants And Macros *
29;*******************************************************************************
30%define RESUME_MAGIC 07eadf00dh
31%define STACK_PADDING 0eeeeeeeeh
32
33
34; For vmmR0LoggerWrapper. (The other architecture(s) use(s) C99 variadic macros.)
35extern NAME(RTLogLogger)
36
37
38BEGINCODE
39
40
41;;
42; The setjmp variant used for calling Ring-3.
43;
44; This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
45; in the middle of a ring-3 call. Another differences is the function pointer and
46; argument. This has to do with resuming code and the stack frame of the caller.
47;
48; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
49; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
50; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
51; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
52; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
53;
54BEGINPROC vmmR0CallRing3SetJmp
55GLOBALNAME vmmR0CallRing3SetJmp2
56GLOBALNAME vmmR0CallRing3SetJmpEx
57 ;
58 ; Save the registers.
59 ;
60 mov edx, [esp + 4h] ; pJmpBuf
61 mov [xDX + VMMR0JMPBUF.ebx], ebx
62 mov [xDX + VMMR0JMPBUF.esi], esi
63 mov [xDX + VMMR0JMPBUF.edi], edi
64 mov [xDX + VMMR0JMPBUF.ebp], ebp
65 mov xAX, [esp]
66 mov [xDX + VMMR0JMPBUF.eip], xAX
67 lea ecx, [esp + 4] ; (used in resume)
68 mov [xDX + VMMR0JMPBUF.esp], ecx
69 pushf
70 pop xAX
71 mov [xDX + VMMR0JMPBUF.eflags], xAX
72
73 ;
74 ; If we're not in a ring-3 call, call pfn and return.
75 ;
76 test byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
77 jnz .resume
78
79 mov ebx, edx ; pJmpBuf -> ebx (persistent reg)
80%ifdef VMM_R0_SWITCH_STACK
81 mov esi, [ebx + VMMR0JMPBUF.pvSavedStack]
82 test esi, esi
83 jz .entry_error
84 %ifdef VBOX_STRICT
85 cmp dword [esi], 0h
86 jne .entry_error
87 mov edx, esi
88 mov edi, esi
89 mov ecx, VMM_STACK_SIZE / 4
90 mov eax, STACK_PADDING
91 repne stosd
92 %endif
93 lea esi, [esi + VMM_STACK_SIZE - 32]
94 mov [esi + 1ch], dword 0deadbeefh ; Marker 1.
95 mov [esi + 18h], ebx ; Save pJmpBuf pointer.
96 mov [esi + 14h], dword 00c00ffeeh ; Marker 2.
97 mov [esi + 10h], dword 0f00dbeefh ; Marker 3.
98 mov edx, [esp + 10h] ; pvArg2
99 mov ecx, [esp + 0ch] ; pvArg1
100 mov eax, [esp + 08h] ; pfn
101 %if 1 ; Use this to eat of some extra stack - handy for finding paths using lots of stack.
102 %define FRAME_OFFSET 0
103 %else
104 %define FRAME_OFFSET 1024
105 %endif
106 mov [esi - FRAME_OFFSET + 04h], edx
107 mov [esi - FRAME_OFFSET ], ecx
108 lea esp, [esi - FRAME_OFFSET] ; Switch stack!
109 call eax
110 and dword [esi + 1ch], byte 0 ; reset marker.
111
112 %ifdef VBOX_STRICT
113 ; Calc stack usage and check for overflows.
114 mov edi, [ebx + VMMR0JMPBUF.pvSavedStack]
115 cmp dword [edi], STACK_PADDING ; Check for obvious stack overflow.
116 jne .stack_overflow
117 mov esi, eax ; save eax
118 mov eax, STACK_PADDING
119 mov ecx, VMM_STACK_SIZE / 4
120 cld
121 repe scasd
122 shl ecx, 2 ; *4
123 cmp ecx, VMM_STACK_SIZE - 64 ; Less than 64 bytes left -> overflow as well.
124 mov eax, esi ; restore eax in case of overflow (esi remains used)
125 jae .stack_overflow_almost
126
127 ; Update stack usage statistics.
128 cmp ecx, [ebx + VMMR0JMPBUF.cbUsedMax] ; New max usage?
129 jle .no_used_max
130 mov [ebx + VMMR0JMPBUF.cbUsedMax], ecx
131.no_used_max:
132 ; To simplify the average stuff, just historize before we hit div errors.
133 inc dword [ebx + VMMR0JMPBUF.cUsedTotal]
134 test [ebx + VMMR0JMPBUF.cUsedTotal], dword 0c0000000h
135 jz .no_historize
136 mov dword [ebx + VMMR0JMPBUF.cUsedTotal], 2
137 mov edi, [ebx + VMMR0JMPBUF.cbUsedAvg]
138 mov [ebx + VMMR0JMPBUF.cbUsedTotal], edi
139 mov dword [ebx + VMMR0JMPBUF.cbUsedTotal + 4], 0
140.no_historize:
141 add [ebx + VMMR0JMPBUF.cbUsedTotal], ecx
142 adc dword [ebx + VMMR0JMPBUF.cbUsedTotal + 4], 0
143 mov eax, [ebx + VMMR0JMPBUF.cbUsedTotal]
144 mov edx, [ebx + VMMR0JMPBUF.cbUsedTotal + 4]
145 mov edi, [ebx + VMMR0JMPBUF.cUsedTotal]
146 div edi
147 mov [ebx + VMMR0JMPBUF.cbUsedAvg], eax
148
149 mov eax, esi ; restore eax (final, esi released)
150
151 mov edi, [ebx + VMMR0JMPBUF.pvSavedStack]
152 mov dword [edi], 0h ; Reset the overflow marker.
153 %endif ; VBOX_STRICT
154
155%else ; !VMM_R0_SWITCH_STACK
156 mov ecx, [esp + 0ch] ; pvArg1
157 mov edx, [esp + 10h] ; pvArg2
158 mov eax, [esp + 08h] ; pfn
159 sub esp, 12 ; align the stack on a 16-byte boundary.
160 mov [esp ], ecx
161 mov [esp + 04h], edx
162 call eax
163%endif ; !VMM_R0_SWITCH_STACK
164 mov edx, ebx ; pJmpBuf -> edx (volatile reg)
165
166 ;
167 ; Return like in the long jump but clear eip, no short cuts here.
168 ;
169.proper_return:
170 mov ebx, [xDX + VMMR0JMPBUF.ebx]
171 mov esi, [xDX + VMMR0JMPBUF.esi]
172 mov edi, [xDX + VMMR0JMPBUF.edi]
173 mov ebp, [xDX + VMMR0JMPBUF.ebp]
174 mov xCX, [xDX + VMMR0JMPBUF.eip]
175 and dword [xDX + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
176 mov esp, [xDX + VMMR0JMPBUF.esp]
177 push dword [xDX + VMMR0JMPBUF.eflags]
178 popf
179 jmp xCX
180
181.entry_error:
182 mov eax, VERR_VMM_SET_JMP_ERROR
183 jmp .proper_return
184
185.stack_overflow:
186 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
187 mov edx, ebx
188 jmp .proper_return
189
190.stack_overflow_almost:
191 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
192 mov edx, ebx
193 jmp .proper_return
194
195 ;
196 ; Aborting resume.
197 ;
198.bad:
199 and dword [xDX + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
200 mov edi, [xDX + VMMR0JMPBUF.edi]
201 mov esi, [xDX + VMMR0JMPBUF.esi]
202 mov ebx, [xDX + VMMR0JMPBUF.ebx]
203 mov eax, VERR_VMM_SET_JMP_ABORTED_RESUME
204 ret
205
206 ;
207 ; Resume VMMRZCallRing3 the call.
208 ;
209.resume:
210 ; Sanity checks.
211%ifdef VMM_R0_SWITCH_STACK
212 mov eax, [xDX + VMMR0JMPBUF.pvSavedStack]
213 %ifdef RT_STRICT
214 cmp dword [eax], STACK_PADDING
215 %endif
216 lea eax, [eax + VMM_STACK_SIZE - 32]
217 cmp dword [eax + 1ch], 0deadbeefh ; Marker 1.
218 jne .bad
219 %ifdef RT_STRICT
220 cmp [esi + 18h], edx ; The saved pJmpBuf pointer.
221 jne .bad
222 cmp dword [esi + 14h], 00c00ffeeh ; Marker 2.
223 jne .bad
224 cmp dword [esi + 10h], 0f00dbeefh ; Marker 3.
225 jne .bad
226 %endif
227%else ; !VMM_R0_SWITCH_STACK
228 cmp ecx, [xDX + VMMR0JMPBUF.SpCheck]
229 jne .bad
230.espCheck_ok:
231 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
232 cmp ecx, VMM_STACK_SIZE
233 ja .bad
234 test ecx, 3
235 jnz .bad
236 mov edi, [xDX + VMMR0JMPBUF.esp]
237 sub edi, [xDX + VMMR0JMPBUF.SpResume]
238 cmp ecx, edi
239 jne .bad
240%endif
241
242%ifdef VMM_R0_SWITCH_STACK
243 ; Switch stack.
244 mov esp, [xDX + VMMR0JMPBUF.SpResume]
245%else
246 ; Restore the stack.
247 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
248 shr ecx, 2
249 mov esi, [xDX + VMMR0JMPBUF.pvSavedStack]
250 mov edi, [xDX + VMMR0JMPBUF.SpResume]
251 mov esp, edi
252 rep movsd
253%endif ; !VMM_R0_SWITCH_STACK
254 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
255
256 ;
257 ; Continue where we left off.
258 ;
259%ifdef VBOX_STRICT
260 pop eax ; magic
261 cmp eax, RESUME_MAGIC
262 je .magic_ok
263 mov ecx, 0123h
264 mov [ecx], edx
265.magic_ok:
266%endif
267 popf
268 pop ebx
269 pop esi
270 pop edi
271 pop ebp
272 xor eax, eax ; VINF_SUCCESS
273 ret
274ENDPROC vmmR0CallRing3SetJmp
275
276
277;;
278; Worker for VMMRZCallRing3.
279; This will save the stack and registers.
280;
281; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
282; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
283;
284BEGINPROC vmmR0CallRing3LongJmp
285 ;
286 ; Save the registers on the stack.
287 ;
288 push ebp
289 mov ebp, esp
290 push edi
291 push esi
292 push ebx
293 pushf
294%ifdef VBOX_STRICT
295 push RESUME_MAGIC
296%endif
297
298 ;
299 ; Load parameters.
300 ;
301 mov edx, [ebp + 08h] ; pJmpBuf
302 mov eax, [ebp + 0ch] ; rc
303
304 ;
305 ; Is the jump buffer armed?
306 ;
307 cmp dword [xDX + VMMR0JMPBUF.eip], byte 0
308 je .nok
309
310 ;
311 ; Sanity checks.
312 ;
313 mov edi, [xDX + VMMR0JMPBUF.pvSavedStack]
314 test edi, edi ; darwin may set this to 0.
315 jz .nok
316 mov [xDX + VMMR0JMPBUF.SpResume], esp
317%ifndef VMM_R0_SWITCH_STACK
318 mov esi, esp
319 mov ecx, [xDX + VMMR0JMPBUF.esp]
320 sub ecx, esi
321
322 ; two sanity checks on the size.
323 cmp ecx, VMM_STACK_SIZE ; check max size.
324 jnbe .nok
325
326 ;
327 ; Copy the stack.
328 ;
329 test ecx, 3 ; check alignment
330 jnz .nok
331 mov [xDX + VMMR0JMPBUF.cbSavedStack], ecx
332 shr ecx, 2
333 rep movsd
334%endif ; !VMM_R0_SWITCH_STACK
335
336 ; Save a PC here to assist unwinding.
337.unwind_point:
338 mov dword [xDX + VMMR0JMPBUF.SavedEipForUnwind], .unwind_point
339 mov ecx, [xDX + VMMR0JMPBUF.ebp]
340 lea ecx, [ecx + 4]
341 mov [xDX + VMMR0JMPBUF.UnwindRetPcLocation], ecx
342
343 ; Save ESP & EBP to enable stack dumps
344 mov ecx, ebp
345 mov [xDX + VMMR0JMPBUF.SavedEbp], ecx
346 sub ecx, 4
347 mov [xDX + VMMR0JMPBUF.SavedEsp], ecx
348
349 ; store the last pieces of info.
350 mov ecx, [xDX + VMMR0JMPBUF.esp]
351 mov [xDX + VMMR0JMPBUF.SpCheck], ecx
352 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
353
354 ;
355 ; Do the long jump.
356 ;
357 mov ebx, [xDX + VMMR0JMPBUF.ebx]
358 mov esi, [xDX + VMMR0JMPBUF.esi]
359 mov edi, [xDX + VMMR0JMPBUF.edi]
360 mov ebp, [xDX + VMMR0JMPBUF.ebp]
361 mov ecx, [xDX + VMMR0JMPBUF.eip]
362 mov [xDX + VMMR0JMPBUF.UnwindRetPcValue], ecx
363 mov esp, [xDX + VMMR0JMPBUF.esp]
364 push dword [xDX + VMMR0JMPBUF.eflags]
365 popf
366 jmp ecx
367
368 ;
369 ; Failure
370 ;
371.nok:
372%ifdef VBOX_STRICT
373 pop eax ; magic
374 cmp eax, RESUME_MAGIC
375 je .magic_ok
376 mov ecx, 0123h
377 mov [ecx], edx
378.magic_ok:
379%endif
380 popf
381 pop ebx
382 pop esi
383 pop edi
384 mov eax, VERR_VMM_LONG_JMP_ERROR
385 leave
386 ret
387ENDPROC vmmR0CallRing3LongJmp
388
389
390;;
391; Internal R0 logger worker: Logger wrapper.
392;
393; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
394;
395EXPORTEDNAME vmmR0LoggerWrapper
396 push 0 ; assumes we're the wrapper for a default instance.
397 call NAME(RTLogLogger)
398 add esp, byte 4
399 ret
400ENDPROC vmmR0LoggerWrapper
401
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette