VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0A.asm@ 7507

Last change on this file since 7507 was 7507, checked in by vboxsync, 17 years ago

Restore saved registers after returning from a call in vmmR0CallHostSetJmp. Otherwise after resume registers that you're not supposed to trash contain the values of the previous run. (esi contains the IRP pointer for instance)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 11.7 KB
Line 
1; $Id: VMMR0A.asm 7507 2008-03-20 14:16:51Z vboxsync $
2;; @file
3; VMM - R0 assembly routines.
4;
5
6;
7; Copyright (C) 2006-2007 innotek GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VMMInternal.mac"
23%include "iprt/err.mac"
24
25
26%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
27extern IMPNAME(RTLogLogger)
28%endif
29
30
31BEGINCODE
32
33
34;;
35; The setjmp variant used for calling Ring-3.
36;
37; This differs from the normal setjmp in that it will resume VMMR0CallHost if we're
38; in the middle of a ring-3 call. Another differences is the function pointer and
39; argument. This has to do with resuming code and the stack frame of the caller.
40;
41; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallHostLongJmp.
42; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+4] Our jmp_buf.
43; @param pfn msc:rdx gcc:rsi x86:[esp+8] The function to be called when not resuming.
44; @param pvUser msc:r8 gcc:rdx x86:[esp+c] The argument of that function.
45;
46BEGINPROC vmmR0CallHostSetJmp
47GLOBALNAME vmmR0CallHostSetJmpEx
48%ifdef RT_ARCH_X86
49 ;
50 ; Save the registers.
51 ;
52 mov edx, [esp + 4h] ; pJmpBuf
53 mov [edx + VMMR0JMPBUF.ebx], ebx
54 mov [edx + VMMR0JMPBUF.esi], esi
55 mov [edx + VMMR0JMPBUF.edi], edi
56 mov [edx + VMMR0JMPBUF.ebp], ebp
57 mov eax, [esp]
58 mov [edx + VMMR0JMPBUF.eip], eax
59 lea ecx, [esp + 4] ; (used in resume)
60 mov [edx + VMMR0JMPBUF.esp], ecx
61
62 ;
63 ; If we're not in a ring-3 call, call pfn and return.
64 ;
65 test byte [edx + VMMR0JMPBUF.fInRing3Call], 1
66 jnz .resume
67
68 mov ecx, [esp + 0ch] ; pvArg
69 mov eax, [esp + 08h] ; pfn
70 sub esp, 12 ; align the stack on a 16-byte boundrary.
71 mov [esp], ecx
72 call eax
73 add esp, 12
74 mov edx, [esp + 4h] ; pJmpBuf
75
76 ; restore the registers that we're not allowed to modify
77 ; otherwise a resume might restore the wrong values (from the previous run)
78 mov edi, [edx + VMMR0JMPBUF.edi]
79 mov esi, [edx + VMMR0JMPBUF.esi]
80 mov ebx, [edx + VMMR0JMPBUF.ebx]
81 mov ebp, [edx + VMMR0JMPBUF.ebp]
82
83 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
84 ret
85
86 ;
87 ; Resume VMMR0CallHost the call.
88 ;
89.resume:
90 ; Sanity checks.
91 cmp ecx, [edx + VMMR0JMPBUF.SpCheck]
92 je .espCheck_ok
93.bad:
94 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
95 mov edi, [edx + VMMR0JMPBUF.edi]
96 mov esi, [edx + VMMR0JMPBUF.esi]
97 mov ebx, [edx + VMMR0JMPBUF.ebx]
98 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
99 ret
100
101.espCheck_ok:
102 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
103 cmp ecx, 8192
104 ja .bad
105 test ecx, 3
106 jnz .bad
107 mov edi, [edx + VMMR0JMPBUF.esp]
108 sub edi, [edx + VMMR0JMPBUF.SpResume]
109 cmp ecx, edi
110 jne .bad
111
112 ;
113 ; Restore the stack.
114 ;
115 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 0
116 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
117 shr ecx, 2
118 mov esi, [edx + VMMR0JMPBUF.pvSavedStack]
119 mov edi, [edx + VMMR0JMPBUF.SpResume]
120 mov esp, edi
121 rep movsd
122
123 ;
124 ; Continue where we left off.
125 ;
126 popf
127 pop ebx
128 pop esi
129 pop edi
130 pop ebp
131 xor eax, eax ; VINF_SUCCESS
132 ret
133%endif ; RT_ARCH_X86
134
135%ifdef RT_ARCH_AMD64
136 ;
137 ; Save the registers.
138 ;
139 push rbp
140 mov rbp, rsp
141 %ifdef ASM_CALL64_MSC
142 sub rsp, 30h
143 mov r11, rdx ; pfn
144 mov rdx, rcx ; pJmpBuf;
145 %else
146 sub rsp, 10h
147 mov r8, rdx ; pvUser (save it like MSC)
148 mov r11, rsi ; pfn
149 mov rdx, rdi ; pJmpBuf
150 %endif
151 mov [rdx + VMMR0JMPBUF.rbx], rbx
152 %ifdef ASM_CALL64_MSC
153 mov [rdx + VMMR0JMPBUF.rsi], rsi
154 mov [rdx + VMMR0JMPBUF.rdi], rdi
155 %endif
156 mov r10, [rbp]
157 mov [rdx + VMMR0JMPBUF.rbp], r10
158 mov [rdx + VMMR0JMPBUF.r12], r12
159 mov [rdx + VMMR0JMPBUF.r13], r13
160 mov [rdx + VMMR0JMPBUF.r14], r14
161 mov [rdx + VMMR0JMPBUF.r15], r15
162 mov rax, [rbp + 8]
163 mov [rdx + VMMR0JMPBUF.rip], rax
164 lea r10, [rbp + 10h] ; (used in resume)
165 mov [rdx + VMMR0JMPBUF.rsp], r10
166
167 ;
168 ; If we're not in a ring-3 call, call pfn and return.
169 ;
170 test byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
171 jnz .resume
172
173 mov [rbp - 8], rdx ; Save it and fix stack alignment (16).
174 %ifdef ASM_CALL64_MSC
175 mov rcx, r8 ; pvUser -> arg0
176 %else
177 mov rdi, r8 ; pvUser -> arg0
178 %endif
179 call r11
180 mov rdx, [rbp - 8] ; pJmpBuf
181
182 ; restore the registers that we're not allowed to modify
183 ; otherwise a resume might restore the wrong values (from the previous run)
184 mov rbx, [rdx + VMMR0JMPBUF.rbx]
185 %ifdef ASM_CALL64_MSC
186 mov rsi, [rdx + VMMR0JMPBUF.rsi]
187 mov rdi, [rdx + VMMR0JMPBUF.rdi]
188 %endif
189 mov r12, [rdx + VMMR0JMPBUF.r12]
190 mov r13, [rdx + VMMR0JMPBUF.r13]
191 mov r14, [rdx + VMMR0JMPBUF.r14]
192 mov r15, [rdx + VMMR0JMPBUF.r15]
193
194 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
195 leave
196 ret
197
198 ;
199 ; Resume VMMR0CallHost the call.
200 ;
201.resume:
202 ; Sanity checks.
203 cmp r10, [rdx + VMMR0JMPBUF.SpCheck]
204 je .rspCheck_ok
205.bad:
206 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
207 mov rbx, [rdx + VMMR0JMPBUF.rbx]
208 %ifdef ASM_CALL64_MSC
209 mov rsi, [rdx + VMMR0JMPBUF.rsi]
210 mov rdi, [rdx + VMMR0JMPBUF.rdi]
211 %endif
212 mov r12, [rdx + VMMR0JMPBUF.r12]
213 mov r13, [rdx + VMMR0JMPBUF.r13]
214 mov r14, [rdx + VMMR0JMPBUF.r14]
215 mov r15, [rdx + VMMR0JMPBUF.r15]
216 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
217 leave
218 ret
219
220.rspCheck_ok:
221 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
222 cmp rcx, 8192
223 ja .bad
224 test rcx, 3
225 jnz .bad
226 mov rdi, [rdx + VMMR0JMPBUF.rsp]
227 sub rdi, [rdx + VMMR0JMPBUF.SpResume]
228 cmp rcx, rdi
229 jne .bad
230
231 ;
232 ; Restore the stack.
233 ;
234 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 0
235 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
236 shr ecx, 3
237 mov rsi, [rdx + VMMR0JMPBUF.pvSavedStack]
238 mov rdi, [rdx + VMMR0JMPBUF.SpResume]
239 mov rsp, rdi
240 rep movsq
241
242 ;
243 ; Continue where we left off.
244 ;
245 popf
246 pop rbx
247 %ifdef ASM_CALL64_MSC
248 pop rsi
249 pop rdi
250 %endif
251 pop r12
252 pop r13
253 pop r14
254 pop r15
255 pop rbp
256 xor eax, eax ; VINF_SUCCESS
257 ret
258%endif
259ENDPROC vmmR0CallHostSetJmp
260
261
262;;
263; Worker for VMMR0CallHost.
264; This will save the stack and registers.
265;
266; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
267; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
268;
269BEGINPROC vmmR0CallHostLongJmp
270%ifdef RT_ARCH_X86
271 ;
272 ; Save the registers on the stack.
273 ;
274 push ebp
275 mov ebp, esp
276 push edi
277 push esi
278 push ebx
279 pushf
280
281 ;
282 ; Load parameters.
283 ;
284 mov edx, [ebp + 08h] ; pJmpBuf
285 mov eax, [ebp + 0ch] ; rc
286
287 ;
288 ; Is the jump buffer armed?
289 ;
290 cmp dword [edx + VMMR0JMPBUF.eip], byte 0
291 je .nok
292
293 ;
294 ; Save the stack.
295 ;
296 mov edi, [edx + VMMR0JMPBUF.pvSavedStack]
297 mov [edx + VMMR0JMPBUF.SpResume], esp
298 mov esi, esp
299 mov ecx, [edx + VMMR0JMPBUF.esp]
300 sub ecx, esi
301
302 ; two sanity checks on the size.
303 cmp ecx, 8192 ; check max size.
304 jbe .ok
305.nok:
306 mov eax, VERR_INTERNAL_ERROR
307 popf
308 pop ebx
309 pop esi
310 pop edi
311 leave
312 ret
313.ok:
314 test ecx, 3 ; check alignment
315 jnz .nok
316 mov [edx + VMMR0JMPBUF.cbSavedStack], ecx
317 shr ecx, 2
318 rep movsd
319
320 ; store the last pieces of info.
321 mov ecx, [edx + VMMR0JMPBUF.esp]
322 mov [edx + VMMR0JMPBUF.SpCheck], ecx
323 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 1
324
325 ;
326 ; Do the long jump.
327 ;
328 mov ebx, [edx + VMMR0JMPBUF.ebx]
329 mov esi, [edx + VMMR0JMPBUF.esi]
330 mov edi, [edx + VMMR0JMPBUF.edi]
331 mov ebp, [edx + VMMR0JMPBUF.ebp]
332 mov ecx, [edx + VMMR0JMPBUF.eip]
333 mov esp, [edx + VMMR0JMPBUF.esp]
334 jmp ecx
335%endif ; RT_ARCH_X86
336
337%ifdef RT_ARCH_AMD64
338 ;
339 ; Save the registers on the stack.
340 ;
341 push rbp
342 mov rbp, rsp
343 push r15
344 push r14
345 push r13
346 push r12
347 %ifdef ASM_CALL64_MSC
348 push rdi
349 push rsi
350 %endif
351 push rbx
352 pushf
353
354 ;
355 ; Normalize the parameters.
356 ;
357 %ifdef ASM_CALL64_MSC
358 mov eax, edx ; rc
359 mov rdx, rcx ; pJmpBuf
360 %else
361 mov rdx, rdi ; pJmpBuf
362 mov eax, esi ; rc
363 %endif
364
365 ;
366 ; Is the jump buffer armed?
367 ;
368 cmp qword [rdx + VMMR0JMPBUF.rip], byte 0
369 je .nok
370
371 ;
372 ; Save the stack.
373 ;
374 mov rdi, [rdx + VMMR0JMPBUF.pvSavedStack]
375 mov [rdx + VMMR0JMPBUF.SpResume], rsp
376 mov rsi, rsp
377 mov rcx, [rdx + VMMR0JMPBUF.rsp]
378 sub rcx, rsi
379
380 ; two sanity checks on the size.
381 cmp rcx, 8192 ; check max size.
382 jbe .ok
383.nok:
384 mov eax, VERR_INTERNAL_ERROR
385 popf
386 pop rbx
387 %ifdef ASM_CALL64_MSC
388 pop rsi
389 pop rdi
390 %endif
391 pop r12
392 pop r13
393 pop r14
394 pop r15
395 leave
396 ret
397
398.ok:
399 test ecx, 7 ; check alignment
400 jnz .nok
401 mov [rdx + VMMR0JMPBUF.cbSavedStack], ecx
402 shr ecx, 3
403 rep movsq
404
405 ; store the last pieces of info.
406 mov rcx, [rdx + VMMR0JMPBUF.rsp]
407 mov [rdx + VMMR0JMPBUF.SpCheck], rcx
408 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
409
410 ;
411 ; Do the long jump.
412 ;
413 mov rbx, [rdx + VMMR0JMPBUF.rbx]
414 %ifdef ASM_CALL64_MSC
415 mov rsi, [rdx + VMMR0JMPBUF.rsi]
416 mov rdi, [rdx + VMMR0JMPBUF.rdi]
417 %endif
418 mov r12, [rdx + VMMR0JMPBUF.r12]
419 mov r13, [rdx + VMMR0JMPBUF.r13]
420 mov r14, [rdx + VMMR0JMPBUF.r14]
421 mov r15, [rdx + VMMR0JMPBUF.r15]
422 mov rbp, [rdx + VMMR0JMPBUF.rbp]
423 mov rcx, [rdx + VMMR0JMPBUF.rip]
424 mov rsp, [rdx + VMMR0JMPBUF.rsp]
425 jmp rcx
426%endif
427ENDPROC vmmR0CallHostLongJmp
428
429
430;;
431; Internal R0 logger worker: Logger wrapper.
432;
433; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
434;
435EXPORTEDNAME vmmR0LoggerWrapper
436%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
437 push 0 ; assumes we're the wrapper for a default instance.
438 call IMP(RTLogLogger)
439 add esp, byte 4
440 ret
441%else
442 int3
443 int3
444 int3
445 ret
446%endif
447ENDPROC vmmR0LoggerWrapper
448
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette