VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm@ 92386

Last change on this file since 92386 was 91806, checked in by vboxsync, 3 years ago

SUPDrv,tstVMMR0CallHost-2: Use the argument count from the function table for generating more optimal wrappers. Too bad we cannot make clang verify the counts, so adding a +2 for safety. bugref:10124

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 16.1 KB
Line 
1; $Id: VMMR0JmpA-amd64.asm 91806 2021-10-18 08:32:39Z vboxsync $
2;; @file
3; VMM - R0 SetJmp / LongJmp routines for AMD64.
4;
5
6;
7; Copyright (C) 2006-2020 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%define RT_ASM_WITH_SEH64_ALT
22%include "VBox/asmdefs.mac"
23%include "VMMInternal.mac"
24%include "VBox/err.mac"
25%include "VBox/param.mac"
26%ifdef VMM_R0_SWITCH_STACK
27 %include "VBox/SUPR0StackWrapper.mac"
28%endif
29
30
31;*******************************************************************************
32;* Defined Constants And Macros *
33;*******************************************************************************
34%define RESUME_MAGIC 07eadf00dh
35%define STACK_PADDING 0eeeeeeeeeeeeeeeeh
36
37;; Workaround for linux 4.6 fast/slow syscall stack depth difference.
38;; Update: This got worse with linux 5.13 and CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT.
39;; The x86 arch_exit_to_user_mode_prepare code limits the offset to 255,
40;; while the generic limit is 1023. See bugref:10064 for details.
41%ifdef VMM_R0_SWITCH_STACK
42 %define STACK_FUZZ_SIZE 0
43%else
44 %ifdef RT_OS_LINUX
45 %define STACK_FUZZ_SIZE 384
46 %else
47 %define STACK_FUZZ_SIZE 128
48 %endif
49%endif
50
51
52BEGINCODE
53
54
55;;
56; The setjmp variant used for calling Ring-3.
57;
58; This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
59; in the middle of a ring-3 call. Another differences is the function pointer and
60; argument. This has to do with resuming code and the stack frame of the caller.
61;
62; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
63; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
64; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
65; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
66; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
67;
68GLOBALNAME vmmR0CallRing3SetJmp2
69GLOBALNAME vmmR0CallRing3SetJmpEx
70BEGINPROC vmmR0CallRing3SetJmp
71 ;
72 ; Save the registers.
73 ;
74 push rbp
75 SEH64_PUSH_xBP
76 mov rbp, rsp
77 SEH64_SET_FRAME_xBP 0
78 %ifdef ASM_CALL64_MSC
79 sub rsp, 30h + STACK_FUZZ_SIZE ; (10h is used by resume (??), 20h for callee spill area)
80 SEH64_ALLOCATE_STACK 30h + STACK_FUZZ_SIZE
81SEH64_END_PROLOGUE
82 mov r11, rdx ; pfn
83 mov rdx, rcx ; pJmpBuf;
84 %else
85 sub rsp, 10h + STACK_FUZZ_SIZE ; (10h is used by resume (??))
86 SEH64_ALLOCATE_STACK 10h + STACK_FUZZ_SIZE
87SEH64_END_PROLOGUE
88 mov r8, rdx ; pvUser1 (save it like MSC)
89 mov r9, rcx ; pvUser2 (save it like MSC)
90 mov r11, rsi ; pfn
91 mov rdx, rdi ; pJmpBuf
92 %endif
93 mov [xDX + VMMR0JMPBUF.rbx], rbx
94 %ifdef ASM_CALL64_MSC
95 mov [xDX + VMMR0JMPBUF.rsi], rsi
96 mov [xDX + VMMR0JMPBUF.rdi], rdi
97 %endif
98 mov [xDX + VMMR0JMPBUF.rbp], rbp
99 mov [xDX + VMMR0JMPBUF.r12], r12
100 mov [xDX + VMMR0JMPBUF.r13], r13
101 mov [xDX + VMMR0JMPBUF.r14], r14
102 mov [xDX + VMMR0JMPBUF.r15], r15
103 mov xAX, [rbp + 8] ; (not really necessary, except for validity check)
104 mov [xDX + VMMR0JMPBUF.rip], xAX
105 %ifdef ASM_CALL64_MSC
106 lea r10, [rsp + 20h] ; must save the spill area
107 %else
108 lea r10, [rsp]
109 %endif
110 mov [xDX + VMMR0JMPBUF.rsp], r10
111 %ifdef RT_OS_WINDOWS
112 movdqa [xDX + VMMR0JMPBUF.xmm6], xmm6
113 movdqa [xDX + VMMR0JMPBUF.xmm7], xmm7
114 movdqa [xDX + VMMR0JMPBUF.xmm8], xmm8
115 movdqa [xDX + VMMR0JMPBUF.xmm9], xmm9
116 movdqa [xDX + VMMR0JMPBUF.xmm10], xmm10
117 movdqa [xDX + VMMR0JMPBUF.xmm11], xmm11
118 movdqa [xDX + VMMR0JMPBUF.xmm12], xmm12
119 movdqa [xDX + VMMR0JMPBUF.xmm13], xmm13
120 movdqa [xDX + VMMR0JMPBUF.xmm14], xmm14
121 movdqa [xDX + VMMR0JMPBUF.xmm15], xmm15
122 %endif
123 pushf
124 pop xAX
125 mov [xDX + VMMR0JMPBUF.rflags], xAX
126
127 ;
128 ; If we're not in a ring-3 call, call pfn and return.
129 ;
130 test byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
131 jnz .resume
132
133.different_call_continue:
134 mov [xDX + VMMR0JMPBUF.pfn], r11
135 mov [xDX + VMMR0JMPBUF.pvUser1], r8
136 mov [xDX + VMMR0JMPBUF.pvUser2], r9
137
138 %ifdef VMM_R0_SWITCH_STACK
139 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
140 test r15, r15
141 jz .entry_error
142 %ifdef VBOX_STRICT
143 cmp dword [r15], 0h
144 jne .entry_error
145 mov rdi, r15
146 mov rcx, VMM_STACK_SIZE / 8
147 mov rax, qword 0eeeeeeeffeeeeeeeh
148 repne stosq
149 mov [rdi - 10h], rbx
150 %endif
151
152 ; New RSP
153 %ifdef WITHOUT_SUPR0STACKINFO
154 lea r15, [r15 + VMM_STACK_SIZE]
155 %else
156 lea r15, [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size]
157
158 ; Plant SUPR0 stack info.
159 mov [r15 + SUPR0STACKINFO.pResumeKernelStack], rsp
160 mov [r15 + SUPR0STACKINFO.pSelf], r15
161 mov dword [r15 + SUPR0STACKINFO.magic0], SUPR0STACKINFO_MAGIC0
162 mov dword [r15 + SUPR0STACKINFO.magic1], SUPR0STACKINFO_MAGIC1
163 mov dword [r15 + SUPR0STACKINFO.magic2], SUPR0STACKINFO_MAGIC2
164 mov dword [r15 + SUPR0STACKINFO.magic3], SUPR0STACKINFO_MAGIC3
165
166 %endif
167
168 ; Switch stack!
169 %ifdef ASM_CALL64_MSC
170 lea rsp, [r15 - 20h]
171 %else
172 mov rsp, r15
173 %endif
174 %endif ; VMM_R0_SWITCH_STACK
175
176 mov r12, rdx ; Save pJmpBuf.
177 %ifdef ASM_CALL64_MSC
178 mov rcx, r8 ; pvUser -> arg0
179 mov rdx, r9
180 %else
181 mov rdi, r8 ; pvUser -> arg0
182 mov rsi, r9
183 %endif
184 call r11
185 mov rdx, r12 ; Restore pJmpBuf
186
187 %ifdef VMM_R0_SWITCH_STACK
188 ; Reset the debug mark and the stack info header.
189 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
190 %ifndef WITHOUT_SUPR0STACKINFO
191 mov qword [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size + SUPR0STACKINFO.magic0], 0h
192 %endif
193 %ifdef VBOX_STRICT
194 mov dword [r15], 0h ; Reset the marker
195 %endif
196 %endif
197
198 ;
199 ; Return like in the long jump but clear eip, no shortcuts here.
200 ;
201.proper_return:
202%ifdef RT_OS_WINDOWS
203 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
204 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
205 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
206 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
207 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
208 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
209 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
210 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
211 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
212 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
213%endif
214 mov rbx, [xDX + VMMR0JMPBUF.rbx]
215%ifdef ASM_CALL64_MSC
216 mov rsi, [xDX + VMMR0JMPBUF.rsi]
217 mov rdi, [xDX + VMMR0JMPBUF.rdi]
218%endif
219 mov r12, [xDX + VMMR0JMPBUF.r12]
220 mov r13, [xDX + VMMR0JMPBUF.r13]
221 mov r14, [xDX + VMMR0JMPBUF.r14]
222 mov r15, [xDX + VMMR0JMPBUF.r15]
223 mov rbp, [xDX + VMMR0JMPBUF.rbp]
224 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
225 mov rsp, [xDX + VMMR0JMPBUF.rsp]
226 push qword [xDX + VMMR0JMPBUF.rflags]
227 popf
228 leave
229 ret
230
231.entry_error:
232 mov eax, VERR_VMM_SET_JMP_ERROR
233 jmp .proper_return
234
235.stack_overflow:
236 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
237 jmp .proper_return
238
239 ;
240 ; Aborting resume.
241 ; Note! No need to restore XMM registers here since we haven't touched them yet.
242 ;
243.bad:
244 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
245 mov rbx, [xDX + VMMR0JMPBUF.rbx]
246 %ifdef ASM_CALL64_MSC
247 mov rsi, [xDX + VMMR0JMPBUF.rsi]
248 mov rdi, [xDX + VMMR0JMPBUF.rdi]
249 %endif
250 mov r12, [xDX + VMMR0JMPBUF.r12]
251 mov r13, [xDX + VMMR0JMPBUF.r13]
252 mov r14, [xDX + VMMR0JMPBUF.r14]
253 mov r15, [xDX + VMMR0JMPBUF.r15]
254 mov eax, VERR_VMM_SET_JMP_ABORTED_RESUME
255 leave
256 ret
257
258 ;
259 ; Not the same call as went to ring-3.
260 ;
261.different_call:
262 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
263 ;; @todo or should we fail here instead?
264 jmp .different_call_continue
265
266 ;
267 ; Resume VMMRZCallRing3 the call.
268 ;
269.resume:
270 ; Check if it's actually the same call, if not just continue with it
271 ; as a regular call (ring-0 assert, then VM destroy).
272 cmp [xDX + VMMR0JMPBUF.pfn], r11
273 jne .different_call
274 cmp [xDX + VMMR0JMPBUF.pvUser1], r8
275 jne .different_call
276 cmp [xDX + VMMR0JMPBUF.pvUser2], r9
277 jne .different_call
278
279 %ifndef VMM_R0_SWITCH_STACK
280 ; Sanity checks incoming stack, applying fuzz if needed.
281 sub r10, [xDX + VMMR0JMPBUF.SpCheck]
282 jz .resume_stack_checked_out
283 add r10, STACK_FUZZ_SIZE ; plus/minus STACK_FUZZ_SIZE is fine.
284 cmp r10, STACK_FUZZ_SIZE * 2
285 ja .bad
286
287 mov r10, [xDX + VMMR0JMPBUF.SpCheck]
288 mov [xDX + VMMR0JMPBUF.rsp], r10 ; Must be update in case of another long jump (used for save calc).
289
290.resume_stack_checked_out:
291 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
292 cmp rcx, VMM_STACK_SIZE
293 ja .bad
294 test rcx, 7
295 jnz .bad
296 mov rdi, [xDX + VMMR0JMPBUF.SpCheck]
297 sub rdi, [xDX + VMMR0JMPBUF.SpResume]
298 cmp rcx, rdi
299 jne .bad
300 %endif
301
302%ifdef VMM_R0_SWITCH_STACK
303 ; Update the signature in case the kernel stack moved.
304 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
305 test r15, r15
306 jz .entry_error
307 %ifndef WITHOUT_SUPR0STACKINFO
308 lea r15, [r15 + VMM_STACK_SIZE - SUPR0STACKINFO_size]
309
310 mov [r15 + SUPR0STACKINFO.pResumeKernelStack], rsp
311 mov [r15 + SUPR0STACKINFO.pSelf], r15
312 mov dword [r15 + SUPR0STACKINFO.magic0], SUPR0STACKINFO_MAGIC0
313 mov dword [r15 + SUPR0STACKINFO.magic1], SUPR0STACKINFO_MAGIC1
314 mov dword [r15 + SUPR0STACKINFO.magic2], SUPR0STACKINFO_MAGIC2
315 mov dword [r15 + SUPR0STACKINFO.magic3], SUPR0STACKINFO_MAGIC3
316 %endif
317
318 ; Switch stack.
319 mov rsp, [xDX + VMMR0JMPBUF.SpResume]
320%else
321 ; Restore the stack.
322 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
323 shr ecx, 3
324 mov rsi, [xDX + VMMR0JMPBUF.pvSavedStack]
325 mov rdi, [xDX + VMMR0JMPBUF.SpResume]
326 mov rsp, rdi
327 rep movsq
328%endif ; !VMM_R0_SWITCH_STACK
329 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
330
331 ;
332 ; Continue where we left off.
333 ;
334%ifdef VBOX_STRICT
335 pop rax ; magic
336 cmp rax, RESUME_MAGIC
337 je .magic_ok
338 mov ecx, 0123h
339 mov [ecx], edx
340.magic_ok:
341%endif
342%ifdef RT_OS_WINDOWS
343 movdqa xmm6, [rsp + 000h]
344 movdqa xmm7, [rsp + 010h]
345 movdqa xmm8, [rsp + 020h]
346 movdqa xmm9, [rsp + 030h]
347 movdqa xmm10, [rsp + 040h]
348 movdqa xmm11, [rsp + 050h]
349 movdqa xmm12, [rsp + 060h]
350 movdqa xmm13, [rsp + 070h]
351 movdqa xmm14, [rsp + 080h]
352 movdqa xmm15, [rsp + 090h]
353 add rsp, 0a0h
354%endif
355 popf
356 pop rbx
357%ifdef ASM_CALL64_MSC
358 pop rsi
359 pop rdi
360%endif
361 pop r12
362 pop r13
363 pop r14
364 pop r15
365 pop rbp
366 xor eax, eax ; VINF_SUCCESS
367 ret
368ENDPROC vmmR0CallRing3SetJmp
369
370
371;;
372; Worker for VMMRZCallRing3.
373; This will save the stack and registers.
374;
375; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
376; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
377;
378BEGINPROC vmmR0CallRing3LongJmp
379 ;
380 ; Save the registers on the stack.
381 ;
382 push rbp
383 SEH64_PUSH_xBP
384 mov rbp, rsp
385 SEH64_SET_FRAME_xBP 0
386 push r15
387 SEH64_PUSH_GREG r15
388 push r14
389 SEH64_PUSH_GREG r14
390 push r13
391 SEH64_PUSH_GREG r13
392 push r12
393 SEH64_PUSH_GREG r12
394%ifdef ASM_CALL64_MSC
395 push rdi
396 SEH64_PUSH_GREG rdi
397 push rsi
398 SEH64_PUSH_GREG rsi
399%endif
400 push rbx
401 SEH64_PUSH_GREG rbx
402 pushf
403 SEH64_ALLOCATE_STACK 8
404%ifdef RT_OS_WINDOWS
405 sub rsp, 0a0h
406 SEH64_ALLOCATE_STACK 0a0h
407 movdqa [rsp + 000h], xmm6
408 movdqa [rsp + 010h], xmm7
409 movdqa [rsp + 020h], xmm8
410 movdqa [rsp + 030h], xmm9
411 movdqa [rsp + 040h], xmm10
412 movdqa [rsp + 050h], xmm11
413 movdqa [rsp + 060h], xmm12
414 movdqa [rsp + 070h], xmm13
415 movdqa [rsp + 080h], xmm14
416 movdqa [rsp + 090h], xmm15
417%endif
418%ifdef VBOX_STRICT
419 push RESUME_MAGIC
420 SEH64_ALLOCATE_STACK 8
421%endif
422SEH64_END_PROLOGUE
423
424 ;
425 ; Normalize the parameters.
426 ;
427%ifdef ASM_CALL64_MSC
428 mov eax, edx ; rc
429 mov rdx, rcx ; pJmpBuf
430%else
431 mov rdx, rdi ; pJmpBuf
432 mov eax, esi ; rc
433%endif
434
435 ;
436 ; Is the jump buffer armed?
437 ;
438 cmp qword [xDX + VMMR0JMPBUF.rip], byte 0
439 je .nok
440
441 ;
442 ; Sanity checks.
443 ;
444 mov rdi, [xDX + VMMR0JMPBUF.pvSavedStack]
445 test rdi, rdi ; darwin may set this to 0.
446 jz .nok
447 mov [xDX + VMMR0JMPBUF.SpResume], rsp
448 %ifndef VMM_R0_SWITCH_STACK
449 mov rsi, rsp
450 mov rcx, [xDX + VMMR0JMPBUF.rsp]
451 sub rcx, rsi
452
453 ; two sanity checks on the size.
454 cmp rcx, VMM_STACK_SIZE ; check max size.
455 jnbe .nok
456
457 ;
458 ; Copy the stack
459 ;
460 test ecx, 7 ; check alignment
461 jnz .nok
462 mov [xDX + VMMR0JMPBUF.cbSavedStack], ecx
463 shr ecx, 3
464 rep movsq
465
466 %endif ; !VMM_R0_SWITCH_STACK
467
468 ; Save a PC and return PC here to assist unwinding.
469.unwind_point:
470 lea rcx, [.unwind_point wrt RIP]
471 mov [xDX + VMMR0JMPBUF.SavedEipForUnwind], rcx
472 mov rcx, [xDX + VMMR0JMPBUF.rbp]
473 lea rcx, [rcx + 8]
474 mov [xDX + VMMR0JMPBUF.UnwindRetPcLocation], rcx
475 mov rcx, [rcx]
476 mov [xDX + VMMR0JMPBUF.UnwindRetPcValue], rcx
477
478 ; Save RSP & RBP to enable stack dumps
479 mov rcx, rbp
480 mov [xDX + VMMR0JMPBUF.SavedEbp], rcx
481 sub rcx, 8
482 mov [xDX + VMMR0JMPBUF.SavedEsp], rcx
483
484 ; store the last pieces of info.
485 mov rcx, [xDX + VMMR0JMPBUF.rsp]
486 mov [xDX + VMMR0JMPBUF.SpCheck], rcx
487 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
488
489 ;
490 ; Do the long jump.
491 ;
492%ifdef RT_OS_WINDOWS
493 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
494 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
495 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
496 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
497 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
498 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
499 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
500 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
501 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
502 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
503%endif
504 mov rbx, [xDX + VMMR0JMPBUF.rbx]
505%ifdef ASM_CALL64_MSC
506 mov rsi, [xDX + VMMR0JMPBUF.rsi]
507 mov rdi, [xDX + VMMR0JMPBUF.rdi]
508%endif
509 mov r12, [xDX + VMMR0JMPBUF.r12]
510 mov r13, [xDX + VMMR0JMPBUF.r13]
511 mov r14, [xDX + VMMR0JMPBUF.r14]
512 mov r15, [xDX + VMMR0JMPBUF.r15]
513 mov rbp, [xDX + VMMR0JMPBUF.rbp]
514 mov rsp, [xDX + VMMR0JMPBUF.rsp]
515 push qword [xDX + VMMR0JMPBUF.rflags]
516 popf
517 leave
518 ret
519
520 ;
521 ; Failure
522 ;
523.nok:
524%ifdef VBOX_STRICT
525 pop rax ; magic
526 cmp rax, RESUME_MAGIC
527 je .magic_ok
528 mov ecx, 0123h
529 mov [rcx], edx
530.magic_ok:
531%endif
532 mov eax, VERR_VMM_LONG_JMP_ERROR
533%ifdef RT_OS_WINDOWS
534 add rsp, 0a0h ; skip XMM registers since they are unmodified.
535%endif
536 popf
537 pop rbx
538%ifdef ASM_CALL64_MSC
539 pop rsi
540 pop rdi
541%endif
542 pop r12
543 pop r13
544 pop r14
545 pop r15
546 leave
547 ret
548ENDPROC vmmR0CallRing3LongJmp
549
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette