VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-x86.asm@ 39402

Last change on this file since 39402 was 39402, checked in by vboxsync, 13 years ago

VMM: don't use generic IPE status codes, use specific ones. Part 1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 11.6 KB
Line 
1; $Id: VMMR0JmpA-x86.asm 39402 2011-11-23 16:25:04Z vboxsync $
2;; @file
3; VMM - R0 SetJmp / LongJmp routines for X86.
4;
5
6;
7; Copyright (C) 2006-2009 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VMMInternal.mac"
23%include "iprt/err.mac"
24%include "VBox/param.mac"
25
26
27;*******************************************************************************
28;* Defined Constants And Macros *
29;*******************************************************************************
30%define RESUME_MAGIC 07eadf00dh
31%define STACK_PADDING 0eeeeeeeeh
32
33
34; For vmmR0LoggerWrapper. (The other architecture(s) use(s) C99 variadic macros.)
35extern NAME(RTLogLogger)
36
37
38BEGINCODE
39
40
41;;
42; The setjmp variant used for calling Ring-3.
43;
44; This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
45; in the middle of a ring-3 call. Another differences is the function pointer and
46; argument. This has to do with resuming code and the stack frame of the caller.
47;
48; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
49; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
50; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
51; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
52; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
53;
54BEGINPROC vmmR0CallRing3SetJmp
55GLOBALNAME vmmR0CallRing3SetJmpEx
56 ;
57 ; Save the registers.
58 ;
59 mov edx, [esp + 4h] ; pJmpBuf
60 mov [xDX + VMMR0JMPBUF.ebx], ebx
61 mov [xDX + VMMR0JMPBUF.esi], esi
62 mov [xDX + VMMR0JMPBUF.edi], edi
63 mov [xDX + VMMR0JMPBUF.ebp], ebp
64 mov xAX, [esp]
65 mov [xDX + VMMR0JMPBUF.eip], xAX
66 lea ecx, [esp + 4] ; (used in resume)
67 mov [xDX + VMMR0JMPBUF.esp], ecx
68 pushf
69 pop xAX
70 mov [xDX + VMMR0JMPBUF.eflags], xAX
71
72 ;
73 ; If we're not in a ring-3 call, call pfn and return.
74 ;
75 test byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
76 jnz .resume
77
78 mov ebx, edx ; pJmpBuf -> ebx (persistent reg)
79%ifdef VMM_R0_SWITCH_STACK
80 mov esi, [ebx + VMMR0JMPBUF.pvSavedStack]
81 test esi, esi
82 jz .entry_error
83 %ifdef VBOX_STRICT
84 cmp dword [esi], 0h
85 jne .entry_error
86 mov edx, esi
87 mov edi, esi
88 mov ecx, VMM_STACK_SIZE / 4
89 mov eax, STACK_PADDING
90 repne stosd
91 %endif
92 lea esi, [esi + VMM_STACK_SIZE - 32]
93 mov [esi + 1ch], dword 0deadbeefh ; Marker 1.
94 mov [esi + 18h], ebx ; Save pJmpBuf pointer.
95 mov [esi + 14h], dword 00c00ffeeh ; Marker 2.
96 mov [esi + 10h], dword 0f00dbeefh ; Marker 3.
97 mov edx, [esp + 10h] ; pvArg2
98 mov ecx, [esp + 0ch] ; pvArg1
99 mov eax, [esp + 08h] ; pfn
100 %if 1 ; Use this to eat of some extra stack - handy for finding paths using lots of stack.
101 %define FRAME_OFFSET 0
102 %else
103 %define FRAME_OFFSET 1024
104 %endif
105 mov [esi - FRAME_OFFSET + 04h], edx
106 mov [esi - FRAME_OFFSET ], ecx
107 lea esp, [esi - FRAME_OFFSET] ; Switch stack!
108 call eax
109 and dword [esi + 1ch], byte 0 ; reset marker.
110
111 %ifdef VBOX_STRICT
112 ; Calc stack usage and check for overflows.
113 mov edi, [ebx + VMMR0JMPBUF.pvSavedStack]
114 cmp dword [edi], STACK_PADDING ; Check for obvious stack overflow.
115 jne .stack_overflow
116 mov esi, eax ; save eax
117 mov eax, STACK_PADDING
118 mov ecx, VMM_STACK_SIZE / 4
119 cld
120 repe scasd
121 shl ecx, 2 ; *4
122 cmp ecx, VMM_STACK_SIZE - 64 ; Less than 64 bytes left -> overflow as well.
123 mov eax, esi ; restore eax in case of overflow (esi remains used)
124 jae .stack_overflow_almost
125
126 ; Update stack usage statistics.
127 cmp ecx, [ebx + VMMR0JMPBUF.cbUsedMax] ; New max usage?
128 jle .no_used_max
129 mov [ebx + VMMR0JMPBUF.cbUsedMax], ecx
130.no_used_max:
131 ; To simplify the average stuff, just historize before we hit div errors.
132 inc dword [ebx + VMMR0JMPBUF.cUsedTotal]
133 test [ebx + VMMR0JMPBUF.cUsedTotal], dword 0c0000000h
134 jz .no_historize
135 mov dword [ebx + VMMR0JMPBUF.cUsedTotal], 2
136 mov edi, [ebx + VMMR0JMPBUF.cbUsedAvg]
137 mov [ebx + VMMR0JMPBUF.cbUsedTotal], edi
138 mov dword [ebx + VMMR0JMPBUF.cbUsedTotal + 4], 0
139.no_historize:
140 add [ebx + VMMR0JMPBUF.cbUsedTotal], ecx
141 adc dword [ebx + VMMR0JMPBUF.cbUsedTotal + 4], 0
142 mov eax, [ebx + VMMR0JMPBUF.cbUsedTotal]
143 mov edx, [ebx + VMMR0JMPBUF.cbUsedTotal + 4]
144 mov edi, [ebx + VMMR0JMPBUF.cUsedTotal]
145 div edi
146 mov [ebx + VMMR0JMPBUF.cbUsedAvg], eax
147
148 mov eax, esi ; restore eax (final, esi released)
149
150 mov edi, [ebx + VMMR0JMPBUF.pvSavedStack]
151 mov dword [edi], 0h ; Reset the overflow marker.
152 %endif ; VBOX_STRICT
153
154%else ; !VMM_R0_SWITCH_STACK
155 mov ecx, [esp + 0ch] ; pvArg1
156 mov edx, [esp + 10h] ; pvArg2
157 mov eax, [esp + 08h] ; pfn
158 sub esp, 12 ; align the stack on a 16-byte boundary.
159 mov [esp ], ecx
160 mov [esp + 04h], edx
161 call eax
162%endif ; !VMM_R0_SWITCH_STACK
163 mov edx, ebx ; pJmpBuf -> edx (volatile reg)
164
165 ;
166 ; Return like in the long jump but clear eip, no short cuts here.
167 ;
168.proper_return:
169 mov ebx, [xDX + VMMR0JMPBUF.ebx]
170 mov esi, [xDX + VMMR0JMPBUF.esi]
171 mov edi, [xDX + VMMR0JMPBUF.edi]
172 mov ebp, [xDX + VMMR0JMPBUF.ebp]
173 mov xCX, [xDX + VMMR0JMPBUF.eip]
174 and dword [xDX + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
175 mov esp, [xDX + VMMR0JMPBUF.esp]
176 push dword [xDX + VMMR0JMPBUF.eflags]
177 popf
178 jmp xCX
179
180.entry_error:
181 mov eax, VERR_VMM_SET_JMP_ERROR
182 jmp .proper_return
183
184.stack_overflow:
185 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
186 mov edx, ebx
187 jmp .proper_return
188
189.stack_overflow_almost:
190 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
191 mov edx, ebx
192 jmp .proper_return
193
194 ;
195 ; Aborting resume.
196 ;
197.bad:
198 and dword [xDX + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
199 mov edi, [xDX + VMMR0JMPBUF.edi]
200 mov esi, [xDX + VMMR0JMPBUF.esi]
201 mov ebx, [xDX + VMMR0JMPBUF.ebx]
202 mov eax, VERR_VMM_SET_JMP_ABORTED_RESUME
203 ret
204
205 ;
206 ; Resume VMMRZCallRing3 the call.
207 ;
208.resume:
209 ; Sanity checks.
210%ifdef VMM_R0_SWITCH_STACK
211 mov eax, [xDX + VMMR0JMPBUF.pvSavedStack]
212 %ifdef RT_STRICT
213 cmp dword [eax], STACK_PADDING
214 %endif
215 lea eax, [eax + VMM_STACK_SIZE - 32]
216 cmp dword [eax + 1ch], 0deadbeefh ; Marker 1.
217 jne .bad
218 %ifdef RT_STRICT
219 cmp [esi + 18h], edx ; The saved pJmpBuf pointer.
220 jne .bad
221 cmp dword [esi + 14h], 00c00ffeeh ; Marker 2.
222 jne .bad
223 cmp dword [esi + 10h], 0f00dbeefh ; Marker 3.
224 jne .bad
225 %endif
226%else ; !VMM_R0_SWITCH_STACK
227 cmp ecx, [xDX + VMMR0JMPBUF.SpCheck]
228 jne .bad
229.espCheck_ok:
230 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
231 cmp ecx, VMM_STACK_SIZE
232 ja .bad
233 test ecx, 3
234 jnz .bad
235 mov edi, [xDX + VMMR0JMPBUF.esp]
236 sub edi, [xDX + VMMR0JMPBUF.SpResume]
237 cmp ecx, edi
238 jne .bad
239%endif
240
241%ifdef VMM_R0_SWITCH_STACK
242 ; Switch stack.
243 mov esp, [xDX + VMMR0JMPBUF.SpResume]
244%else
245 ; Restore the stack.
246 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
247 shr ecx, 2
248 mov esi, [xDX + VMMR0JMPBUF.pvSavedStack]
249 mov edi, [xDX + VMMR0JMPBUF.SpResume]
250 mov esp, edi
251 rep movsd
252%endif ; !VMM_R0_SWITCH_STACK
253 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
254
255 ;
256 ; Continue where we left off.
257 ;
258%ifdef VBOX_STRICT
259 pop eax ; magic
260 cmp eax, RESUME_MAGIC
261 je .magic_ok
262 mov ecx, 0123h
263 mov [ecx], edx
264.magic_ok:
265%endif
266 popf
267 pop ebx
268 pop esi
269 pop edi
270 pop ebp
271 xor eax, eax ; VINF_SUCCESS
272 ret
273ENDPROC vmmR0CallRing3SetJmp
274
275
276;;
277; Worker for VMMRZCallRing3.
278; This will save the stack and registers.
279;
280; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
281; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
282;
283BEGINPROC vmmR0CallRing3LongJmp
284 ;
285 ; Save the registers on the stack.
286 ;
287 push ebp
288 mov ebp, esp
289 push edi
290 push esi
291 push ebx
292 pushf
293%ifdef VBOX_STRICT
294 push RESUME_MAGIC
295%endif
296
297 ;
298 ; Load parameters.
299 ;
300 mov edx, [ebp + 08h] ; pJmpBuf
301 mov eax, [ebp + 0ch] ; rc
302
303 ;
304 ; Is the jump buffer armed?
305 ;
306 cmp dword [xDX + VMMR0JMPBUF.eip], byte 0
307 je .nok
308
309 ;
310 ; Sanity checks.
311 ;
312 mov edi, [xDX + VMMR0JMPBUF.pvSavedStack]
313 test edi, edi ; darwin may set this to 0.
314 jz .nok
315 mov [xDX + VMMR0JMPBUF.SpResume], esp
316%ifndef VMM_R0_SWITCH_STACK
317 mov esi, esp
318 mov ecx, [xDX + VMMR0JMPBUF.esp]
319 sub ecx, esi
320
321 ; two sanity checks on the size.
322 cmp ecx, VMM_STACK_SIZE ; check max size.
323 jnbe .nok
324
325 ;
326 ; Copy the stack.
327 ;
328 test ecx, 3 ; check alignment
329 jnz .nok
330 mov [xDX + VMMR0JMPBUF.cbSavedStack], ecx
331 shr ecx, 2
332 rep movsd
333%endif ; !VMM_R0_SWITCH_STACK
334
335 ; Save ESP & EBP to enable stack dumps
336 mov ecx, ebp
337 mov [xDX + VMMR0JMPBUF.SavedEbp], ecx
338 sub ecx, 4
339 mov [xDX + VMMR0JMPBUF.SavedEsp], ecx
340
341 ; store the last pieces of info.
342 mov ecx, [xDX + VMMR0JMPBUF.esp]
343 mov [xDX + VMMR0JMPBUF.SpCheck], ecx
344 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
345
346 ;
347 ; Do the long jump.
348 ;
349 mov ebx, [xDX + VMMR0JMPBUF.ebx]
350 mov esi, [xDX + VMMR0JMPBUF.esi]
351 mov edi, [xDX + VMMR0JMPBUF.edi]
352 mov ebp, [xDX + VMMR0JMPBUF.ebp]
353 mov ecx, [xDX + VMMR0JMPBUF.eip]
354 mov esp, [xDX + VMMR0JMPBUF.esp]
355 push dword [xDX + VMMR0JMPBUF.eflags]
356 popf
357 jmp ecx
358
359 ;
360 ; Failure
361 ;
362.nok:
363%ifdef VBOX_STRICT
364 pop eax ; magic
365 cmp eax, RESUME_MAGIC
366 je .magic_ok
367 mov ecx, 0123h
368 mov [ecx], edx
369.magic_ok:
370%endif
371 popf
372 pop ebx
373 pop esi
374 pop edi
375 mov eax, VERR_VMM_LONG_JMP_ERROR
376 leave
377 ret
378ENDPROC vmmR0CallRing3LongJmp
379
380
381;;
382; Internal R0 logger worker: Logger wrapper.
383;
384; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
385;
386EXPORTEDNAME vmmR0LoggerWrapper
387 push 0 ; assumes we're the wrapper for a default instance.
388 call NAME(RTLogLogger)
389 add esp, byte 4
390 ret
391ENDPROC vmmR0LoggerWrapper
392
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette