VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0A.asm@ 18768

Last change on this file since 18768 was 14721, checked in by vboxsync, 16 years ago

VMMR0A.asm: win.x86 linker warning.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 14.2 KB
Line 
1; $Id: VMMR0A.asm 14721 2008-11-27 16:34:10Z vboxsync $
2;; @file
3; VMM - R0 assembly routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VMMInternal.mac"
27%include "iprt/err.mac"
28
29
30%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
31extern NAME(RTLogLogger)
32%endif
33
34%ifdef RT_OS_DARWIN
35 %define VMM_R0_SWITCH_STACK
36%endif
37
38
39BEGINCODE
40
41
42;;
43; The setjmp variant used for calling Ring-3.
44;
45; This differs from the normal setjmp in that it will resume VMMR0CallHost if we're
46; in the middle of a ring-3 call. Another differences is the function pointer and
47; argument. This has to do with resuming code and the stack frame of the caller.
48;
49; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallHostLongJmp.
50; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
51; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
52; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
53; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
54;
55BEGINPROC vmmR0CallHostSetJmp
56GLOBALNAME vmmR0CallHostSetJmpEx
57%ifdef RT_ARCH_X86
58 ;
59 ; Save the registers.
60 ;
61 mov edx, [esp + 4h] ; pJmpBuf
62 mov [edx + VMMR0JMPBUF.ebx], ebx
63 mov [edx + VMMR0JMPBUF.esi], esi
64 mov [edx + VMMR0JMPBUF.edi], edi
65 mov [edx + VMMR0JMPBUF.ebp], ebp
66 mov eax, [esp]
67 mov [edx + VMMR0JMPBUF.eip], eax
68 lea ecx, [esp + 4] ; (used in resume)
69 mov [edx + VMMR0JMPBUF.esp], ecx
70
71 ;
72 ; If we're not in a ring-3 call, call pfn and return.
73 ;
74 test byte [edx + VMMR0JMPBUF.fInRing3Call], 1
75 jnz .resume
76
77 mov ebx, edx ; pJmpBuf -> ebx (persistent reg)
78%ifdef VMM_R0_SWITCH_STACK
79 mov esi, [ebx + VMMR0JMPBUF.pvSavedStack]
80 test esi, esi
81 jz .entry_error
82 %ifdef VBOX_STRICT
83 mov edx, esi
84 mov edi, esi
85 mov ecx, 2048
86 mov eax, 0eeeeeeeeh
87 repne stosd
88 %endif
89 lea esi, [esi + 8192 - 32]
90 mov [esi + 1ch], dword 0deadbeefh ; Marker 1.
91 mov [esi + 18h], ebx ; Save pJmpBuf pointer.
92 mov [esi + 14h], dword 00c00ffeeh ; Marker 2.
93 mov [esi + 10h], dword 0f00dbeefh ; Marker 3.
94 mov edx, [esp + 10h] ; pvArg2
95 mov [esi + 04h], edx
96 mov ecx, [esp + 0ch] ; pvArg1
97 mov [esi ], ecx
98 mov eax, [esp + 08h] ; pfn
99 mov esp, esi ; Switch stack!
100 call eax
101 and dword [esi + 1ch], byte 0 ; clear marker.
102
103%else ; !VMM_R0_SWITCH_STACK
104 mov ecx, [esp + 0ch] ; pvArg1
105 mov edx, [esp + 10h] ; pvArg2
106 mov eax, [esp + 08h] ; pfn
107 sub esp, 12 ; align the stack on a 16-byte boundrary.
108 mov [esp ], ecx
109 mov [esp + 04h], edx
110 call eax
111%endif ; !VMM_R0_SWITCH_STACK
112 mov edx, ebx ; pJmpBuf -> edx (volatile reg)
113
114 ;
115 ; Return like in the long jump.
116 ; (It is vital that we restore all registers since they might've changed
117 ; by a long jump.)
118 ;
119.proper_return:
120 mov ebx, [edx + VMMR0JMPBUF.ebx]
121 mov esi, [edx + VMMR0JMPBUF.esi]
122 mov edi, [edx + VMMR0JMPBUF.edi]
123 mov ebp, [edx + VMMR0JMPBUF.ebp]
124 mov ecx, [edx + VMMR0JMPBUF.eip]
125 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
126 mov esp, [edx + VMMR0JMPBUF.esp]
127 jmp ecx
128
129.entry_error:
130 mov eax, VERR_INTERNAL_ERROR
131 jmp .proper_return
132
133 ;
134 ; Resume VMMR0CallHost the call.
135 ;
136.resume:
137%ifdef VMM_R0_SWITCH_STACK
138 ; Switch stack.
139 mov esp, [edx + VMMR0JMPBUF.SpResume]
140%else ; !VMM_R0_SWITCH_STACK
141 ; Sanity checks.
142 cmp ecx, [edx + VMMR0JMPBUF.SpCheck]
143 je .espCheck_ok
144.bad:
145 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
146 mov edi, [edx + VMMR0JMPBUF.edi]
147 mov esi, [edx + VMMR0JMPBUF.esi]
148 mov ebx, [edx + VMMR0JMPBUF.ebx]
149 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
150 ret
151
152.espCheck_ok:
153 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
154 cmp ecx, 8192
155 ja .bad
156 test ecx, 3
157 jnz .bad
158 mov edi, [edx + VMMR0JMPBUF.esp]
159 sub edi, [edx + VMMR0JMPBUF.SpResume]
160 cmp ecx, edi
161 jne .bad
162
163 ;
164 ; Restore the stack.
165 ;
166 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
167 shr ecx, 2
168 mov esi, [edx + VMMR0JMPBUF.pvSavedStack]
169 mov edi, [edx + VMMR0JMPBUF.SpResume]
170 mov esp, edi
171 rep movsd
172%endif ; !VMM_R0_SWITCH_STACK
173 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 0
174
175 ;
176 ; Continue where we left off.
177 ;
178%ifdef VBOX_STRICT
179 pop eax ; magic
180 cmp eax, 0f00dbed0h
181 je .magic_ok
182 mov ecx, 0123h
183 mov [ecx], edx
184.magic_ok:
185%endif
186 popf
187 pop ebx
188 pop esi
189 pop edi
190 pop ebp
191 xor eax, eax ; VINF_SUCCESS
192 ret
193%endif ; RT_ARCH_X86
194
195%ifdef RT_ARCH_AMD64
196 ;
197 ; Save the registers.
198 ;
199 push rbp
200 mov rbp, rsp
201 %ifdef ASM_CALL64_MSC
202 sub rsp, 30h
203 mov r11, rdx ; pfn
204 mov rdx, rcx ; pJmpBuf;
205 %else
206 sub rsp, 10h
207 mov r8, rdx ; pvUser1 (save it like MSC)
208 mov r9, rcx ; pvUser2 (save it like MSC)
209 mov r11, rsi ; pfn
210 mov rdx, rdi ; pJmpBuf
211 %endif
212 mov [rdx + VMMR0JMPBUF.rbx], rbx
213 %ifdef ASM_CALL64_MSC
214 mov [rdx + VMMR0JMPBUF.rsi], rsi
215 mov [rdx + VMMR0JMPBUF.rdi], rdi
216 %endif
217 mov r10, [rbp]
218 mov [rdx + VMMR0JMPBUF.rbp], r10
219 mov [rdx + VMMR0JMPBUF.r12], r12
220 mov [rdx + VMMR0JMPBUF.r13], r13
221 mov [rdx + VMMR0JMPBUF.r14], r14
222 mov [rdx + VMMR0JMPBUF.r15], r15
223 mov rax, [rbp + 8]
224 mov [rdx + VMMR0JMPBUF.rip], rax
225 lea r10, [rbp + 10h] ; (used in resume)
226 mov [rdx + VMMR0JMPBUF.rsp], r10
227
228 ;
229 ; If we're not in a ring-3 call, call pfn and return.
230 ;
231 test byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
232 jnz .resume
233
234 mov [rbp - 8], rdx ; Save it and fix stack alignment (16).
235 %ifdef ASM_CALL64_MSC
236 mov rcx, r8 ; pvUser -> arg0
237 mov rdx, r9
238 %else
239 mov rdi, r8 ; pvUser -> arg0
240 mov rsi, r9
241 %endif
242 call r11
243 mov rdx, [rbp - 8] ; pJmpBuf
244
245 ; restore the registers that we're not allowed to modify
246 ; otherwise a resume might restore the wrong values (from the previous run)
247 mov rbx, [rdx + VMMR0JMPBUF.rbx]
248 %ifdef ASM_CALL64_MSC
249 mov rsi, [rdx + VMMR0JMPBUF.rsi]
250 mov rdi, [rdx + VMMR0JMPBUF.rdi]
251 %endif
252 mov r12, [rdx + VMMR0JMPBUF.r12]
253 mov r13, [rdx + VMMR0JMPBUF.r13]
254 mov r14, [rdx + VMMR0JMPBUF.r14]
255 mov r15, [rdx + VMMR0JMPBUF.r15]
256
257 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
258 leave
259 ret
260
261 ;
262 ; Resume VMMR0CallHost the call.
263 ;
264.resume:
265 ; Sanity checks.
266 cmp r10, [rdx + VMMR0JMPBUF.SpCheck]
267 je .rspCheck_ok
268.bad:
269 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
270 mov rbx, [rdx + VMMR0JMPBUF.rbx]
271 %ifdef ASM_CALL64_MSC
272 mov rsi, [rdx + VMMR0JMPBUF.rsi]
273 mov rdi, [rdx + VMMR0JMPBUF.rdi]
274 %endif
275 mov r12, [rdx + VMMR0JMPBUF.r12]
276 mov r13, [rdx + VMMR0JMPBUF.r13]
277 mov r14, [rdx + VMMR0JMPBUF.r14]
278 mov r15, [rdx + VMMR0JMPBUF.r15]
279 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
280 leave
281 ret
282
283.rspCheck_ok:
284 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
285 cmp rcx, 8192
286 ja .bad
287 test rcx, 3
288 jnz .bad
289 mov rdi, [rdx + VMMR0JMPBUF.rsp]
290 sub rdi, [rdx + VMMR0JMPBUF.SpResume]
291 cmp rcx, rdi
292 jne .bad
293
294 ;
295 ; Restore the stack.
296 ;
297 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 0
298 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
299 shr ecx, 3
300 mov rsi, [rdx + VMMR0JMPBUF.pvSavedStack]
301 mov rdi, [rdx + VMMR0JMPBUF.SpResume]
302 mov rsp, rdi
303 rep movsq
304
305 ;
306 ; Continue where we left off.
307 ;
308 popf
309 pop rbx
310 %ifdef ASM_CALL64_MSC
311 pop rsi
312 pop rdi
313 %endif
314 pop r12
315 pop r13
316 pop r14
317 pop r15
318 pop rbp
319 xor eax, eax ; VINF_SUCCESS
320 ret
321%endif
322ENDPROC vmmR0CallHostSetJmp
323
324
325;;
326; Worker for VMMR0CallHost.
327; This will save the stack and registers.
328;
329; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
330; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
331;
332BEGINPROC vmmR0CallHostLongJmp
333%ifdef RT_ARCH_X86
334 ;
335 ; Save the registers on the stack.
336 ;
337 push ebp
338 mov ebp, esp
339 push edi
340 push esi
341 push ebx
342 pushf
343%ifdef VBOX_STRICT
344 push dword 0f00dbed0h
345%endif
346
347 ;
348 ; Load parameters.
349 ;
350 mov edx, [ebp + 08h] ; pJmpBuf
351 mov eax, [ebp + 0ch] ; rc
352
353 ;
354 ; Is the jump buffer armed?
355 ;
356 cmp dword [edx + VMMR0JMPBUF.eip], byte 0
357 je .nok
358
359 ;
360 ; Sanity checks.
361 ;
362 mov edi, [edx + VMMR0JMPBUF.pvSavedStack]
363 test edi, edi ; darwin may set this to 0.
364 jz .nok
365 mov [edx + VMMR0JMPBUF.SpResume], esp
366%ifndef VMM_R0_SWITCH_STACK
367 mov esi, esp
368 mov ecx, [edx + VMMR0JMPBUF.esp]
369 sub ecx, esi
370
371 ; two sanity checks on the size.
372 cmp ecx, 8192 ; check max size.
373 jnbe .nok
374
375 ;
376 ; Copy the stack.
377 ;
378 test ecx, 3 ; check alignment
379 jnz .nok
380 mov [edx + VMMR0JMPBUF.cbSavedStack], ecx
381 shr ecx, 2
382 rep movsd
383%endif ; !VMM_R0_SWITCH_STACK
384
385 ; store the last pieces of info.
386 mov ecx, [edx + VMMR0JMPBUF.esp]
387 mov [edx + VMMR0JMPBUF.SpCheck], ecx
388 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 1
389
390 ;
391 ; Do the long jump.
392 ;
393 mov ebx, [edx + VMMR0JMPBUF.ebx]
394 mov esi, [edx + VMMR0JMPBUF.esi]
395 mov edi, [edx + VMMR0JMPBUF.edi]
396 mov ebp, [edx + VMMR0JMPBUF.ebp]
397 mov ecx, [edx + VMMR0JMPBUF.eip]
398 mov esp, [edx + VMMR0JMPBUF.esp]
399 jmp ecx
400
401 ;
402 ; Failure
403 ;
404.nok:
405%ifdef VBOX_STRICT
406 pop eax ; magic
407 cmp eax, 0f00dbed0h
408 je .magic_ok
409 mov ecx, 0123h
410 mov [ecx], edx
411.magic_ok:
412%endif
413 popf
414 pop ebx
415 pop esi
416 pop edi
417 mov eax, VERR_INTERNAL_ERROR
418 leave
419 ret
420%endif ; RT_ARCH_X86
421
422%ifdef RT_ARCH_AMD64
423 ;
424 ; Save the registers on the stack.
425 ;
426 push rbp
427 mov rbp, rsp
428 push r15
429 push r14
430 push r13
431 push r12
432 %ifdef ASM_CALL64_MSC
433 push rdi
434 push rsi
435 %endif
436 push rbx
437 pushf
438
439 ;
440 ; Normalize the parameters.
441 ;
442 %ifdef ASM_CALL64_MSC
443 mov eax, edx ; rc
444 mov rdx, rcx ; pJmpBuf
445 %else
446 mov rdx, rdi ; pJmpBuf
447 mov eax, esi ; rc
448 %endif
449
450 ;
451 ; Is the jump buffer armed?
452 ;
453 cmp qword [rdx + VMMR0JMPBUF.rip], byte 0
454 je .nok
455
456 ;
457 ; Save the stack.
458 ;
459 mov rdi, [rdx + VMMR0JMPBUF.pvSavedStack]
460 test rdi, rdi ; darwin may set this to 0.
461 jz .nok
462 mov [rdx + VMMR0JMPBUF.SpResume], rsp
463 mov rsi, rsp
464 mov rcx, [rdx + VMMR0JMPBUF.rsp]
465 sub rcx, rsi
466
467 ; two sanity checks on the size.
468 cmp rcx, 8192 ; check max size.
469 jbe .ok
470.nok:
471 mov eax, VERR_INTERNAL_ERROR
472 popf
473 pop rbx
474 %ifdef ASM_CALL64_MSC
475 pop rsi
476 pop rdi
477 %endif
478 pop r12
479 pop r13
480 pop r14
481 pop r15
482 leave
483 ret
484
485.ok:
486 test ecx, 7 ; check alignment
487 jnz .nok
488 mov [rdx + VMMR0JMPBUF.cbSavedStack], ecx
489 shr ecx, 3
490 rep movsq
491
492 ; store the last pieces of info.
493 mov rcx, [rdx + VMMR0JMPBUF.rsp]
494 mov [rdx + VMMR0JMPBUF.SpCheck], rcx
495 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
496
497 ;
498 ; Do the long jump.
499 ;
500 mov rbx, [rdx + VMMR0JMPBUF.rbx]
501 %ifdef ASM_CALL64_MSC
502 mov rsi, [rdx + VMMR0JMPBUF.rsi]
503 mov rdi, [rdx + VMMR0JMPBUF.rdi]
504 %endif
505 mov r12, [rdx + VMMR0JMPBUF.r12]
506 mov r13, [rdx + VMMR0JMPBUF.r13]
507 mov r14, [rdx + VMMR0JMPBUF.r14]
508 mov r15, [rdx + VMMR0JMPBUF.r15]
509 mov rbp, [rdx + VMMR0JMPBUF.rbp]
510 mov rcx, [rdx + VMMR0JMPBUF.rip]
511 mov rsp, [rdx + VMMR0JMPBUF.rsp]
512 jmp rcx
513%endif
514ENDPROC vmmR0CallHostLongJmp
515
516
517;;
518; Internal R0 logger worker: Logger wrapper.
519;
520; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
521;
522EXPORTEDNAME vmmR0LoggerWrapper
523%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
524 push 0 ; assumes we're the wrapper for a default instance.
525 call NAME(RTLogLogger)
526 add esp, byte 4
527 ret
528%else
529 int3
530 int3
531 int3
532 ret
533%endif
534ENDPROC vmmR0LoggerWrapper
535
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette