VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0A.asm@ 11424

Last change on this file since 11424 was 8155, checked in by vboxsync, 17 years ago

The Big Sun Rebranding Header Change

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 11.8 KB
Line 
1; $Id: VMMR0A.asm 8155 2008-04-18 15:16:47Z vboxsync $
2;; @file
3; VMM - R0 assembly routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VMMInternal.mac"
27%include "iprt/err.mac"
28
29
30%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
31extern IMPNAME(RTLogLogger)
32%endif
33
34
35BEGINCODE
36
37
38;;
39; The setjmp variant used for calling Ring-3.
40;
41; This differs from the normal setjmp in that it will resume VMMR0CallHost if we're
42; in the middle of a ring-3 call. Another differences is the function pointer and
43; argument. This has to do with resuming code and the stack frame of the caller.
44;
45; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallHostLongJmp.
46; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+4] Our jmp_buf.
47; @param pfn msc:rdx gcc:rsi x86:[esp+8] The function to be called when not resuming.
48; @param pvUser msc:r8 gcc:rdx x86:[esp+c] The argument of that function.
49;
50BEGINPROC vmmR0CallHostSetJmp
51GLOBALNAME vmmR0CallHostSetJmpEx
52%ifdef RT_ARCH_X86
53 ;
54 ; Save the registers.
55 ;
56 mov edx, [esp + 4h] ; pJmpBuf
57 mov [edx + VMMR0JMPBUF.ebx], ebx
58 mov [edx + VMMR0JMPBUF.esi], esi
59 mov [edx + VMMR0JMPBUF.edi], edi
60 mov [edx + VMMR0JMPBUF.ebp], ebp
61 mov eax, [esp]
62 mov [edx + VMMR0JMPBUF.eip], eax
63 lea ecx, [esp + 4] ; (used in resume)
64 mov [edx + VMMR0JMPBUF.esp], ecx
65
66 ;
67 ; If we're not in a ring-3 call, call pfn and return.
68 ;
69 test byte [edx + VMMR0JMPBUF.fInRing3Call], 1
70 jnz .resume
71
72 mov ecx, [esp + 0ch] ; pvArg
73 mov eax, [esp + 08h] ; pfn
74 sub esp, 12 ; align the stack on a 16-byte boundrary.
75 mov [esp], ecx
76 call eax
77 add esp, 12
78 mov edx, [esp + 4h] ; pJmpBuf
79
80 ; restore the registers that we're not allowed to modify
81 ; otherwise a resume might restore the wrong values (from the previous run)
82 mov edi, [edx + VMMR0JMPBUF.edi]
83 mov esi, [edx + VMMR0JMPBUF.esi]
84 mov ebx, [edx + VMMR0JMPBUF.ebx]
85 mov ebp, [edx + VMMR0JMPBUF.ebp]
86
87 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
88 ret
89
90 ;
91 ; Resume VMMR0CallHost the call.
92 ;
93.resume:
94 ; Sanity checks.
95 cmp ecx, [edx + VMMR0JMPBUF.SpCheck]
96 je .espCheck_ok
97.bad:
98 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
99 mov edi, [edx + VMMR0JMPBUF.edi]
100 mov esi, [edx + VMMR0JMPBUF.esi]
101 mov ebx, [edx + VMMR0JMPBUF.ebx]
102 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
103 ret
104
105.espCheck_ok:
106 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
107 cmp ecx, 8192
108 ja .bad
109 test ecx, 3
110 jnz .bad
111 mov edi, [edx + VMMR0JMPBUF.esp]
112 sub edi, [edx + VMMR0JMPBUF.SpResume]
113 cmp ecx, edi
114 jne .bad
115
116 ;
117 ; Restore the stack.
118 ;
119 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 0
120 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
121 shr ecx, 2
122 mov esi, [edx + VMMR0JMPBUF.pvSavedStack]
123 mov edi, [edx + VMMR0JMPBUF.SpResume]
124 mov esp, edi
125 rep movsd
126
127 ;
128 ; Continue where we left off.
129 ;
130 popf
131 pop ebx
132 pop esi
133 pop edi
134 pop ebp
135 xor eax, eax ; VINF_SUCCESS
136 ret
137%endif ; RT_ARCH_X86
138
139%ifdef RT_ARCH_AMD64
140 ;
141 ; Save the registers.
142 ;
143 push rbp
144 mov rbp, rsp
145 %ifdef ASM_CALL64_MSC
146 sub rsp, 30h
147 mov r11, rdx ; pfn
148 mov rdx, rcx ; pJmpBuf;
149 %else
150 sub rsp, 10h
151 mov r8, rdx ; pvUser (save it like MSC)
152 mov r11, rsi ; pfn
153 mov rdx, rdi ; pJmpBuf
154 %endif
155 mov [rdx + VMMR0JMPBUF.rbx], rbx
156 %ifdef ASM_CALL64_MSC
157 mov [rdx + VMMR0JMPBUF.rsi], rsi
158 mov [rdx + VMMR0JMPBUF.rdi], rdi
159 %endif
160 mov r10, [rbp]
161 mov [rdx + VMMR0JMPBUF.rbp], r10
162 mov [rdx + VMMR0JMPBUF.r12], r12
163 mov [rdx + VMMR0JMPBUF.r13], r13
164 mov [rdx + VMMR0JMPBUF.r14], r14
165 mov [rdx + VMMR0JMPBUF.r15], r15
166 mov rax, [rbp + 8]
167 mov [rdx + VMMR0JMPBUF.rip], rax
168 lea r10, [rbp + 10h] ; (used in resume)
169 mov [rdx + VMMR0JMPBUF.rsp], r10
170
171 ;
172 ; If we're not in a ring-3 call, call pfn and return.
173 ;
174 test byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
175 jnz .resume
176
177 mov [rbp - 8], rdx ; Save it and fix stack alignment (16).
178 %ifdef ASM_CALL64_MSC
179 mov rcx, r8 ; pvUser -> arg0
180 %else
181 mov rdi, r8 ; pvUser -> arg0
182 %endif
183 call r11
184 mov rdx, [rbp - 8] ; pJmpBuf
185
186 ; restore the registers that we're not allowed to modify
187 ; otherwise a resume might restore the wrong values (from the previous run)
188 mov rbx, [rdx + VMMR0JMPBUF.rbx]
189 %ifdef ASM_CALL64_MSC
190 mov rsi, [rdx + VMMR0JMPBUF.rsi]
191 mov rdi, [rdx + VMMR0JMPBUF.rdi]
192 %endif
193 mov r12, [rdx + VMMR0JMPBUF.r12]
194 mov r13, [rdx + VMMR0JMPBUF.r13]
195 mov r14, [rdx + VMMR0JMPBUF.r14]
196 mov r15, [rdx + VMMR0JMPBUF.r15]
197
198 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
199 leave
200 ret
201
202 ;
203 ; Resume VMMR0CallHost the call.
204 ;
205.resume:
206 ; Sanity checks.
207 cmp r10, [rdx + VMMR0JMPBUF.SpCheck]
208 je .rspCheck_ok
209.bad:
210 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
211 mov rbx, [rdx + VMMR0JMPBUF.rbx]
212 %ifdef ASM_CALL64_MSC
213 mov rsi, [rdx + VMMR0JMPBUF.rsi]
214 mov rdi, [rdx + VMMR0JMPBUF.rdi]
215 %endif
216 mov r12, [rdx + VMMR0JMPBUF.r12]
217 mov r13, [rdx + VMMR0JMPBUF.r13]
218 mov r14, [rdx + VMMR0JMPBUF.r14]
219 mov r15, [rdx + VMMR0JMPBUF.r15]
220 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
221 leave
222 ret
223
224.rspCheck_ok:
225 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
226 cmp rcx, 8192
227 ja .bad
228 test rcx, 3
229 jnz .bad
230 mov rdi, [rdx + VMMR0JMPBUF.rsp]
231 sub rdi, [rdx + VMMR0JMPBUF.SpResume]
232 cmp rcx, rdi
233 jne .bad
234
235 ;
236 ; Restore the stack.
237 ;
238 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 0
239 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
240 shr ecx, 3
241 mov rsi, [rdx + VMMR0JMPBUF.pvSavedStack]
242 mov rdi, [rdx + VMMR0JMPBUF.SpResume]
243 mov rsp, rdi
244 rep movsq
245
246 ;
247 ; Continue where we left off.
248 ;
249 popf
250 pop rbx
251 %ifdef ASM_CALL64_MSC
252 pop rsi
253 pop rdi
254 %endif
255 pop r12
256 pop r13
257 pop r14
258 pop r15
259 pop rbp
260 xor eax, eax ; VINF_SUCCESS
261 ret
262%endif
263ENDPROC vmmR0CallHostSetJmp
264
265
266;;
267; Worker for VMMR0CallHost.
268; This will save the stack and registers.
269;
270; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
271; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
272;
273BEGINPROC vmmR0CallHostLongJmp
274%ifdef RT_ARCH_X86
275 ;
276 ; Save the registers on the stack.
277 ;
278 push ebp
279 mov ebp, esp
280 push edi
281 push esi
282 push ebx
283 pushf
284
285 ;
286 ; Load parameters.
287 ;
288 mov edx, [ebp + 08h] ; pJmpBuf
289 mov eax, [ebp + 0ch] ; rc
290
291 ;
292 ; Is the jump buffer armed?
293 ;
294 cmp dword [edx + VMMR0JMPBUF.eip], byte 0
295 je .nok
296
297 ;
298 ; Save the stack.
299 ;
300 mov edi, [edx + VMMR0JMPBUF.pvSavedStack]
301 mov [edx + VMMR0JMPBUF.SpResume], esp
302 mov esi, esp
303 mov ecx, [edx + VMMR0JMPBUF.esp]
304 sub ecx, esi
305
306 ; two sanity checks on the size.
307 cmp ecx, 8192 ; check max size.
308 jbe .ok
309.nok:
310 mov eax, VERR_INTERNAL_ERROR
311 popf
312 pop ebx
313 pop esi
314 pop edi
315 leave
316 ret
317.ok:
318 test ecx, 3 ; check alignment
319 jnz .nok
320 mov [edx + VMMR0JMPBUF.cbSavedStack], ecx
321 shr ecx, 2
322 rep movsd
323
324 ; store the last pieces of info.
325 mov ecx, [edx + VMMR0JMPBUF.esp]
326 mov [edx + VMMR0JMPBUF.SpCheck], ecx
327 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 1
328
329 ;
330 ; Do the long jump.
331 ;
332 mov ebx, [edx + VMMR0JMPBUF.ebx]
333 mov esi, [edx + VMMR0JMPBUF.esi]
334 mov edi, [edx + VMMR0JMPBUF.edi]
335 mov ebp, [edx + VMMR0JMPBUF.ebp]
336 mov ecx, [edx + VMMR0JMPBUF.eip]
337 mov esp, [edx + VMMR0JMPBUF.esp]
338 jmp ecx
339%endif ; RT_ARCH_X86
340
341%ifdef RT_ARCH_AMD64
342 ;
343 ; Save the registers on the stack.
344 ;
345 push rbp
346 mov rbp, rsp
347 push r15
348 push r14
349 push r13
350 push r12
351 %ifdef ASM_CALL64_MSC
352 push rdi
353 push rsi
354 %endif
355 push rbx
356 pushf
357
358 ;
359 ; Normalize the parameters.
360 ;
361 %ifdef ASM_CALL64_MSC
362 mov eax, edx ; rc
363 mov rdx, rcx ; pJmpBuf
364 %else
365 mov rdx, rdi ; pJmpBuf
366 mov eax, esi ; rc
367 %endif
368
369 ;
370 ; Is the jump buffer armed?
371 ;
372 cmp qword [rdx + VMMR0JMPBUF.rip], byte 0
373 je .nok
374
375 ;
376 ; Save the stack.
377 ;
378 mov rdi, [rdx + VMMR0JMPBUF.pvSavedStack]
379 mov [rdx + VMMR0JMPBUF.SpResume], rsp
380 mov rsi, rsp
381 mov rcx, [rdx + VMMR0JMPBUF.rsp]
382 sub rcx, rsi
383
384 ; two sanity checks on the size.
385 cmp rcx, 8192 ; check max size.
386 jbe .ok
387.nok:
388 mov eax, VERR_INTERNAL_ERROR
389 popf
390 pop rbx
391 %ifdef ASM_CALL64_MSC
392 pop rsi
393 pop rdi
394 %endif
395 pop r12
396 pop r13
397 pop r14
398 pop r15
399 leave
400 ret
401
402.ok:
403 test ecx, 7 ; check alignment
404 jnz .nok
405 mov [rdx + VMMR0JMPBUF.cbSavedStack], ecx
406 shr ecx, 3
407 rep movsq
408
409 ; store the last pieces of info.
410 mov rcx, [rdx + VMMR0JMPBUF.rsp]
411 mov [rdx + VMMR0JMPBUF.SpCheck], rcx
412 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
413
414 ;
415 ; Do the long jump.
416 ;
417 mov rbx, [rdx + VMMR0JMPBUF.rbx]
418 %ifdef ASM_CALL64_MSC
419 mov rsi, [rdx + VMMR0JMPBUF.rsi]
420 mov rdi, [rdx + VMMR0JMPBUF.rdi]
421 %endif
422 mov r12, [rdx + VMMR0JMPBUF.r12]
423 mov r13, [rdx + VMMR0JMPBUF.r13]
424 mov r14, [rdx + VMMR0JMPBUF.r14]
425 mov r15, [rdx + VMMR0JMPBUF.r15]
426 mov rbp, [rdx + VMMR0JMPBUF.rbp]
427 mov rcx, [rdx + VMMR0JMPBUF.rip]
428 mov rsp, [rdx + VMMR0JMPBUF.rsp]
429 jmp rcx
430%endif
431ENDPROC vmmR0CallHostLongJmp
432
433
434;;
435; Internal R0 logger worker: Logger wrapper.
436;
437; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
438;
439EXPORTEDNAME vmmR0LoggerWrapper
440%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
441 push 0 ; assumes we're the wrapper for a default instance.
442 call IMP(RTLogLogger)
443 add esp, byte 4
444 ret
445%else
446 int3
447 int3
448 int3
449 ret
450%endif
451ENDPROC vmmR0LoggerWrapper
452
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette