VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm@ 60891

Last change on this file since 60891 was 60891, checked in by vboxsync, 9 years ago

TRPM,CPUM: Return to v8086 mode fixes.

  • We may have entered from protected mode and be returning to V8086 mode, so the larger V8086 frame always has to be pushed. TRPM should call CPUMGCCallV86Code instead of duplicating the code.
  • We must clear the valid bit in all the selectors when returning to v8086 mode to make sure we reload the hidden bits the next time we need them, since v8086 and protect mode are very different and our checks could get confused.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 15.4 KB
Line 
1; $Id: CPUMRCA.asm 60891 2016-05-09 12:55:50Z vboxsync $
2;; @file
3; CPUM - Raw-mode Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VMMRC.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29
30;*******************************************************************************
31;* External Symbols *
32;*******************************************************************************
33extern IMPNAME(g_CPUM) ; VMM GC Builtin import
34extern IMPNAME(g_VM) ; VMM GC Builtin import
35extern NAME(cpumRCHandleNPAndGP) ; CPUMGC.cpp
36extern NAME(CPUMRCAssertPreExecutionSanity)
37
38
39;
40; Enables write protection of Hypervisor memory pages.
41; !note! Must be commented out for Trap8 debug handler.
42;
43%define ENABLE_WRITE_PROTECTION 1
44
45BEGINCODE
46
47
48;;
49; Handles lazy FPU saving and restoring.
50;
51; This handler will implement lazy fpu (sse/mmx/stuff) saving.
52; Two actions may be taken in this handler since the Guest OS may
53; be doing lazy fpu switching. So, we'll have to generate those
54; traps which the Guest CPU CTX shall have according to the
55; its CR0 flags. If no traps for the Guest OS, we'll save the host
56; context and restore the guest context.
57;
58; @returns 0 if caller should continue execution.
59; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
60; @param pCpumCpu [ebp+8] Pointer to the CPUMCPU.
61;
62align 16
63BEGINPROC cpumHandleLazyFPUAsm
64 push ebp
65 mov ebp, esp
66 push ebx
67 push esi
68 mov ebx, [ebp + 8]
69%define pCpumCpu ebx
70%define pXState esi
71
72 ;
73 ; Figure out what to do.
74 ;
75 ; There are two basic actions:
76 ; 1. Save host fpu and restore guest fpu.
77 ; 2. Generate guest trap.
78 ;
79 ; When entering the hypervisor we'll always enable MP (for proper wait
80 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
81 ; is taken from the guest OS in order to get proper SSE handling.
82 ;
83 ;
84 ; Actions taken depending on the guest CR0 flags:
85 ;
86 ; 3 2 1
87 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
88 ; ------------------------------------------------------------------------
89 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
90 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
91 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
92 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
93 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
94 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
95 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
96 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
97
98 ;
99 ; Before taking any of these actions we're checking if we have already
100 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
101 ;
102 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU
103 jz hlfpua_not_loaded
104 jmp hlfpua_guest_trap
105
106 ;
107 ; Take action.
108 ;
109align 16
110hlfpua_not_loaded:
111 mov eax, [pCpumCpu + CPUMCPU.Guest.cr0]
112 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
113 jmp dword [eax*2 + hlfpuajmp1]
114align 16
115;; jump table using fpu related cr0 flags as index.
116hlfpuajmp1:
117 RTCCPTR_DEF hlfpua_switch_fpu_ctx
118 RTCCPTR_DEF hlfpua_switch_fpu_ctx
119 RTCCPTR_DEF hlfpua_switch_fpu_ctx
120 RTCCPTR_DEF hlfpua_switch_fpu_ctx
121 RTCCPTR_DEF hlfpua_switch_fpu_ctx
122 RTCCPTR_DEF hlfpua_guest_trap
123 RTCCPTR_DEF hlfpua_switch_fpu_ctx
124 RTCCPTR_DEF hlfpua_guest_trap
125;; and mask for cr0.
126hlfpu_afFlags:
127 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
128 RTCCPTR_DEF ~(X86_CR0_TS)
129 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
130 RTCCPTR_DEF ~(X86_CR0_TS)
131 RTCCPTR_DEF ~(X86_CR0_MP)
132 RTCCPTR_DEF 0
133 RTCCPTR_DEF ~(X86_CR0_MP)
134 RTCCPTR_DEF 0
135
136 ;
137 ; Action - switch FPU context and change cr0 flags.
138 ;
139align 16
140hlfpua_switch_fpu_ctx:
141 mov ecx, cr0
142 mov edx, ecx
143 and ecx, [eax*2 + hlfpu_afFlags] ; Calc the new cr0 flags. Do NOT use ECX until we restore it!
144 and edx, ~(X86_CR0_TS | X86_CR0_EM)
145 mov cr0, edx ; Clear flags so we don't trap here.
146
147 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
148 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC]
149 or eax, eax
150 jz hlfpua_host_fxsave
151 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
152 xsave [pXState]
153 jmp hlfpua_host_done
154hlfpua_host_fxsave:
155 fxsave [pXState]
156hlfpua_host_done:
157
158 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
159 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC]
160 or eax, eax
161 jz hlfpua_guest_fxrstor
162 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
163 xrstor [pXState]
164 jmp hlfpua_guest_done
165hlfpua_guest_fxrstor:
166 fxrstor [pXState]
167hlfpua_guest_done:
168
169hlfpua_finished_switch:
170 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
171
172 ; Load new CR0 value.
173 mov cr0, ecx ; load the new cr0 flags.
174
175 ; return continue execution.
176 pop esi
177 pop ebx
178 xor eax, eax
179 leave
180 ret
181
182 ;
183 ; Action - Generate Guest trap.
184 ;
185hlfpua_action_4:
186hlfpua_guest_trap:
187 pop esi
188 pop ebx
189 mov eax, VINF_EM_RAW_GUEST_TRAP
190 leave
191 ret
192ENDPROC cpumHandleLazyFPUAsm
193
194
195;;
196; Calls a guest trap/interrupt handler directly
197; Assumes a trap stack frame has already been setup on the guest's stack!
198;
199; @param pRegFrame [esp + 4] Original trap/interrupt context
200; @param selCS [esp + 8] Code selector of handler
201; @param pHandler [esp + 12] GC virtual address of handler
202; @param eflags [esp + 16] Callee's EFLAGS
203; @param selSS [esp + 20] Stack selector for handler
204; @param pEsp [esp + 24] Stack address for handler
205;
206; @remark This call never returns!
207;
208; VMMRCDECL(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTGCPTR pHandler, uint32_t eflags, uint32_t selSS, RTGCPTR pEsp);
209align 16
210BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
211 mov ebp, esp
212
213 ; construct iret stack frame
214 push dword [ebp + 20] ; SS
215 push dword [ebp + 24] ; ESP
216 push dword [ebp + 16] ; EFLAGS
217 push dword [ebp + 8] ; CS
218 push dword [ebp + 12] ; EIP
219
220 ;
221 ; enable WP
222 ;
223%ifdef ENABLE_WRITE_PROTECTION
224 mov eax, cr0
225 or eax, X86_CR0_WRITE_PROTECT
226 mov cr0, eax
227%endif
228
229 ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
230 mov ebp, [ebp + 4] ; pRegFrame
231 mov ebx, [ebp + CPUMCTXCORE.ebx]
232 mov ecx, [ebp + CPUMCTXCORE.ecx]
233 mov edx, [ebp + CPUMCTXCORE.edx]
234 mov esi, [ebp + CPUMCTXCORE.esi]
235 mov edi, [ebp + CPUMCTXCORE.edi]
236
237 ;; @todo load segment registers *before* enabling WP.
238 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
239 mov gs, [ebp + CPUMCTXCORE.gs.Sel]
240 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
241 mov fs, [ebp + CPUMCTXCORE.fs.Sel]
242 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
243 mov es, [ebp + CPUMCTXCORE.es.Sel]
244 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
245 mov ds, [ebp + CPUMCTXCORE.ds.Sel]
246
247 mov eax, [ebp + CPUMCTXCORE.eax]
248 mov ebp, [ebp + CPUMCTXCORE.ebp]
249
250 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
251 iret
252ENDPROC CPUMGCCallGuestTrapHandler
253
254
255;;
256; Performs an iret to V86 code
257; Assumes a trap stack frame has already been setup on the guest's stack!
258;
259; @param pRegFrame Original trap/interrupt context
260;
261; This function does not return!
262;
263;VMMRCDECL(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
264align 16
265BEGINPROC CPUMGCCallV86Code
266 push ebp
267 mov ebp, esp
268 mov ebx, [ebp + 8] ; pRegFrame
269
270 ; Construct iret stack frame.
271 push dword [ebx + CPUMCTXCORE.gs.Sel]
272 push dword [ebx + CPUMCTXCORE.fs.Sel]
273 push dword [ebx + CPUMCTXCORE.ds.Sel]
274 push dword [ebx + CPUMCTXCORE.es.Sel]
275 push dword [ebx + CPUMCTXCORE.ss.Sel]
276 push dword [ebx + CPUMCTXCORE.esp]
277 push dword [ebx + CPUMCTXCORE.eflags]
278 push dword [ebx + CPUMCTXCORE.cs.Sel]
279 push dword [ebx + CPUMCTXCORE.eip]
280
281 ; Invalidate all segment registers.
282 mov al, ~CPUMSELREG_FLAGS_VALID
283 and [ebx + CPUMCTXCORE.fs.fFlags], al
284 and [ebx + CPUMCTXCORE.ds.fFlags], al
285 and [ebx + CPUMCTXCORE.es.fFlags], al
286 and [ebx + CPUMCTXCORE.ss.fFlags], al
287 and [ebx + CPUMCTXCORE.gs.fFlags], al
288 and [ebx + CPUMCTXCORE.cs.fFlags], al
289
290 ;
291 ; enable WP
292 ;
293%ifdef ENABLE_WRITE_PROTECTION
294 mov eax, cr0
295 or eax, X86_CR0_WRITE_PROTECT
296 mov cr0, eax
297%endif
298
299 ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
300 mov eax, [ebx + CPUMCTXCORE.eax]
301 mov ecx, [ebx + CPUMCTXCORE.ecx]
302 mov edx, [ebx + CPUMCTXCORE.edx]
303 mov esi, [ebx + CPUMCTXCORE.esi]
304 mov edi, [ebx + CPUMCTXCORE.edi]
305 mov ebp, [ebx + CPUMCTXCORE.ebp]
306 mov ebx, [ebx + CPUMCTXCORE.ebx]
307
308 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
309 iret
310ENDPROC CPUMGCCallV86Code
311
312
313;;
314; This is a main entry point for resuming (or starting) guest
315; code execution.
316;
317; We get here directly from VMMSwitcher.asm (jmp at the end
318; of VMMSwitcher_HostToGuest).
319;
320; This call never returns!
321;
322; @param edx Pointer to CPUMCPU structure.
323;
324align 16
325BEGINPROC_EXPORTED CPUMGCResumeGuest
326%ifdef VBOX_STRICT
327 ; Call CPUM to check sanity.
328 push edx
329 mov edx, IMP(g_VM)
330 push edx
331 call NAME(CPUMRCAssertPreExecutionSanity)
332 add esp, 4
333 pop edx
334%endif
335
336 ;
337 ; Setup iretd
338 ;
339 push dword [edx + CPUMCPU.Guest.ss.Sel]
340 push dword [edx + CPUMCPU.Guest.esp]
341 push dword [edx + CPUMCPU.Guest.eflags]
342 push dword [edx + CPUMCPU.Guest.cs.Sel]
343 push dword [edx + CPUMCPU.Guest.eip]
344
345 ;
346 ; Restore registers.
347 ;
348 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
349 mov es, [edx + CPUMCPU.Guest.es.Sel]
350 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
351 mov fs, [edx + CPUMCPU.Guest.fs.Sel]
352 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
353 mov gs, [edx + CPUMCPU.Guest.gs.Sel]
354
355%ifdef VBOX_WITH_STATISTICS
356 ;
357 ; Statistics.
358 ;
359 push edx
360 mov edx, IMP(g_VM)
361 lea edx, [edx + VM.StatTotalQemuToGC]
362 STAM_PROFILE_ADV_STOP edx
363
364 mov edx, IMP(g_VM)
365 lea edx, [edx + VM.StatTotalInGC]
366 STAM_PROFILE_ADV_START edx
367 pop edx
368%endif
369
370 ;
371 ; enable WP
372 ;
373%ifdef ENABLE_WRITE_PROTECTION
374 mov eax, cr0
375 or eax, X86_CR0_WRITE_PROTECT
376 mov cr0, eax
377%endif
378
379 ;
380 ; Continue restore.
381 ;
382 mov esi, [edx + CPUMCPU.Guest.esi]
383 mov edi, [edx + CPUMCPU.Guest.edi]
384 mov ebp, [edx + CPUMCPU.Guest.ebp]
385 mov ebx, [edx + CPUMCPU.Guest.ebx]
386 mov ecx, [edx + CPUMCPU.Guest.ecx]
387 mov eax, [edx + CPUMCPU.Guest.eax]
388 push dword [edx + CPUMCPU.Guest.ds.Sel]
389 mov edx, [edx + CPUMCPU.Guest.edx]
390 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
391 pop ds
392
393 ; restart execution.
394 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
395 iretd
396ENDPROC CPUMGCResumeGuest
397
398
399;;
400; This is a main entry point for resuming (or starting) guest
401; code execution for raw V86 mode
402;
403; We get here directly from VMMSwitcher.asm (jmp at the end
404; of VMMSwitcher_HostToGuest).
405;
406; This call never returns!
407;
408; @param edx Pointer to CPUMCPU structure.
409;
410align 16
411BEGINPROC_EXPORTED CPUMGCResumeGuestV86
412%ifdef VBOX_STRICT
413 ; Call CPUM to check sanity.
414 push edx
415 mov edx, IMP(g_VM)
416 push edx
417 call NAME(CPUMRCAssertPreExecutionSanity)
418 add esp, 4
419 pop edx
420%endif
421
422 ;
423 ; Setup iretd
424 ;
425 push dword [edx + CPUMCPU.Guest.gs.Sel]
426 push dword [edx + CPUMCPU.Guest.fs.Sel]
427 push dword [edx + CPUMCPU.Guest.ds.Sel]
428 push dword [edx + CPUMCPU.Guest.es.Sel]
429
430 push dword [edx + CPUMCPU.Guest.ss.Sel]
431 push dword [edx + CPUMCPU.Guest.esp]
432
433 push dword [edx + CPUMCPU.Guest.eflags]
434 push dword [edx + CPUMCPU.Guest.cs.Sel]
435 push dword [edx + CPUMCPU.Guest.eip]
436
437 ;
438 ; Restore registers.
439 ;
440
441%ifdef VBOX_WITH_STATISTICS
442 ;
443 ; Statistics.
444 ;
445 push edx
446 mov edx, IMP(g_VM)
447 lea edx, [edx + VM.StatTotalQemuToGC]
448 STAM_PROFILE_ADV_STOP edx
449
450 mov edx, IMP(g_VM)
451 lea edx, [edx + VM.StatTotalInGC]
452 STAM_PROFILE_ADV_START edx
453 pop edx
454%endif
455
456 ;
457 ; enable WP
458 ;
459%ifdef ENABLE_WRITE_PROTECTION
460 mov eax, cr0
461 or eax, X86_CR0_WRITE_PROTECT
462 mov cr0, eax
463%endif
464
465 ;
466 ; Continue restore.
467 ;
468 mov esi, [edx + CPUMCPU.Guest.esi]
469 mov edi, [edx + CPUMCPU.Guest.edi]
470 mov ebp, [edx + CPUMCPU.Guest.ebp]
471 mov ecx, [edx + CPUMCPU.Guest.ecx]
472 mov ebx, [edx + CPUMCPU.Guest.ebx]
473 mov eax, [edx + CPUMCPU.Guest.eax]
474 mov edx, [edx + CPUMCPU.Guest.edx]
475
476 ; restart execution.
477 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
478 iretd
479ENDPROC CPUMGCResumeGuestV86
480
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette