VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 50653

Last change on this file since 50653 was 49020, checked in by vboxsync, 11 years ago

VMM: FPU cleanup, CPUMAllA.asm is RC only, move it to CPUMRCA.asm and delete CPUMAllA.asm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.3 KB
Line 
1; $Id: CPUMR0A.asm 49020 2013-10-10 08:52:52Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29%ifdef IN_RING3
30 %error "The jump table doesn't link on leopard."
31%endif
32
33;*******************************************************************************
34;* Defined Constants And Macros *
35;*******************************************************************************
36;; The offset of the XMM registers in X86FXSTATE.
37; Use define because I'm too lazy to convert the struct.
38%define XMM_OFF_IN_X86FXSTATE 160
39
40
41;*******************************************************************************
42;* External Symbols *
43;*******************************************************************************
44%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
45extern NAME(SUPR0AbsIs64bit)
46extern NAME(SUPR0Abs64bitKernelCS)
47extern NAME(SUPR0Abs64bitKernelSS)
48extern NAME(SUPR0Abs64bitKernelDS)
49extern NAME(SUPR0AbsKernelCS)
50%endif
51
52
53;*******************************************************************************
54;* Global Variables *
55;*******************************************************************************
56%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
57BEGINDATA
58;;
59; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
60; needing to clobber a register. (This trick doesn't quite work for PE btw.
61; but that's not relevant atm.)
62GLOBALNAME g_fCPUMIs64bitHost
63 dd NAME(SUPR0AbsIs64bit)
64%endif
65
66
67BEGINCODE
68
69;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
70; Cleans the FPU state, if necessary, before restoring the FPU.
71;
72; This macro ASSUMES CR0.TS is not set!
73; @remarks Trashes xAX!!
74; Changes here should also be reflected in CPUMRCA.asm's copy!
75%macro CLEANFPU 0
76 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
77 jz .nothing_to_clean
78
79 xor eax, eax
80 fnstsw ax ; Get FSW
81 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
82 ; while clearing & loading the FPU bits in 'clean_fpu'
83 jz .clean_fpu
84 fnclex
85
86.clean_fpu:
87 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs
88 ; for the upcoming push (load)
89 fild dword [xDX + CPUMCPU.Guest.fpu] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
90
91.nothing_to_clean:
92%endmacro
93
94;; Macro to save and modify CR0 (if necessary) before touching the FPU state
95; so as to not cause any FPU exceptions.
96;
97; @remarks Uses xCX for backing-up CR0 (if CR0 needs to be modified) otherwise clears xCX.
98; @remarks Trashes xAX.
99%macro SAVE_CR0_CLEAR_FPU_TRAPS 0
100 xor ecx, ecx
101 mov xAX, cr0
102 test eax, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
103 jz %%skip_cr0_write
104 mov xCX, xAX ; Save old CR0
105 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
106 mov cr0, xAX
107%%skip_cr0_write:
108%endmacro
109
110;; Macro to restore CR0 from xCX if necessary.
111;
112; @remarks xCX should contain the CR0 value to restore or 0 if no restoration is needed.
113%macro RESTORE_CR0 0
114 cmp ecx, 0
115 je %%skip_cr0_restore
116 mov cr0, xCX
117%%skip_cr0_restore:
118%endmacro
119
120;;
121; Saves the host FPU/XMM state and restores the guest state.
122;
123; @returns 0
124; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
125;
126align 16
127BEGINPROC cpumR0SaveHostRestoreGuestFPUState
128%ifdef RT_ARCH_AMD64
129 %ifdef RT_OS_WINDOWS
130 mov xDX, rcx
131 %else
132 mov xDX, rdi
133 %endif
134%else
135 mov xDX, dword [esp + 4]
136%endif
137 pushf ; The darwin kernel can get upset or upset things if an
138 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
139
140 ; Switch the state.
141 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
142
143 ; Clear CR0 FPU bits to not cause exceptions, uses xCX
144 SAVE_CR0_CLEAR_FPU_TRAPS
145 ; Do NOT use xCX from this point!
146
147%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
148 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
149 jz .legacy_mode
150 db 0xea ; jmp far .sixtyfourbit_mode
151 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
152.legacy_mode:
153%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
154
155%ifdef RT_ARCH_AMD64
156 ; Use explicit REX prefix. See @bugref{6398}.
157 o64 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
158 o64 fxrstor [xDX + CPUMCPU.Guest.fpu]
159%else
160 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
161 fxrstor [xDX + CPUMCPU.Guest.fpu]
162%endif
163
164%ifdef VBOX_WITH_KERNEL_USING_XMM
165 ; Restore the non-volatile xmm registers. ASSUMING 64-bit windows
166 lea r11, [xDX + CPUMCPU.Host.fpu + XMM_OFF_IN_X86FXSTATE]
167 movdqa xmm6, [r11 + 060h]
168 movdqa xmm7, [r11 + 070h]
169 movdqa xmm8, [r11 + 080h]
170 movdqa xmm9, [r11 + 090h]
171 movdqa xmm10, [r11 + 0a0h]
172 movdqa xmm11, [r11 + 0b0h]
173 movdqa xmm12, [r11 + 0c0h]
174 movdqa xmm13, [r11 + 0d0h]
175 movdqa xmm14, [r11 + 0e0h]
176 movdqa xmm15, [r11 + 0f0h]
177%endif
178
179.done:
180 ; Restore CR0 from xCX if it was previously saved.
181 RESTORE_CR0
182 popf
183 xor eax, eax
184 ret
185
186%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
187ALIGNCODE(16)
188BITS 64
189.sixtyfourbit_mode:
190 and edx, 0ffffffffh
191 o64 fxsave [rdx + CPUMCPU.Host.fpu]
192 o64 fxrstor [rdx + CPUMCPU.Guest.fpu]
193 jmp far [.fpret wrt rip]
194.fpret: ; 16:32 Pointer to .the_end.
195 dd .done, NAME(SUPR0AbsKernelCS)
196BITS 32
197%endif
198ENDPROC cpumR0SaveHostRestoreGuestFPUState
199
200
201%ifndef RT_ARCH_AMD64
202%ifdef VBOX_WITH_64_BITS_GUESTS
203%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
204;;
205; Saves the host FPU/XMM state
206;
207; @returns 0
208; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
209;
210align 16
211BEGINPROC cpumR0SaveHostFPUState
212 mov xDX, dword [esp + 4]
213 pushf ; The darwin kernel can get upset or upset things if an
214 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
215
216 ; Switch the state.
217 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
218
219 ; Clear CR0 FPU bits to not cause exceptions, uses xCX
220 SAVE_CR0_CLEAR_FPU_TRAPS
221 ; Do NOT use xCX from this point!
222
223 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
224
225 ; Restore CR0 from xCX if it was saved previously.
226 RESTORE_CR0
227
228 popf
229 xor eax, eax
230 ret
231ENDPROC cpumR0SaveHostFPUState
232%endif
233%endif
234%endif
235
236
237;;
238; Saves the guest FPU/XMM state and restores the host state.
239;
240; @returns 0
241; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
242;
243align 16
244BEGINPROC cpumR0SaveGuestRestoreHostFPUState
245%ifdef RT_ARCH_AMD64
246 %ifdef RT_OS_WINDOWS
247 mov xDX, rcx
248 %else
249 mov xDX, rdi
250 %endif
251%else
252 mov xDX, dword [esp + 4]
253%endif
254
255 ; Only restore FPU if guest has used it.
256 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
257 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
258 jz short .fpu_not_used
259
260 pushf ; The darwin kernel can get upset or upset things if an
261 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
262
263 ; Clear CR0 FPU bits to not cause exceptions, uses xCX
264 SAVE_CR0_CLEAR_FPU_TRAPS
265 ; Do NOT use xCX from this point!
266
267%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
268 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
269 jz .legacy_mode
270 db 0xea ; jmp far .sixtyfourbit_mode
271 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
272.legacy_mode:
273%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
274
275%ifdef RT_ARCH_AMD64
276 ; Use explicit REX prefix. See @bugref{6398}.
277 o64 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
278 o64 fxrstor [xDX + CPUMCPU.Host.fpu]
279%else
280 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes support fxsave/fxrstor (safe assumption)
281 fxrstor [xDX + CPUMCPU.Host.fpu]
282%endif
283
284.done:
285 ; Restore CR0 from xCX if it was previously saved.
286 RESTORE_CR0
287 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
288 popf
289.fpu_not_used:
290 xor eax, eax
291 ret
292
293%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
294ALIGNCODE(16)
295BITS 64
296.sixtyfourbit_mode:
297 and edx, 0ffffffffh
298 o64 fxsave [rdx + CPUMCPU.Guest.fpu]
299 o64 fxrstor [rdx + CPUMCPU.Host.fpu]
300 jmp far [.fpret wrt rip]
301.fpret: ; 16:32 Pointer to .the_end.
302 dd .done, NAME(SUPR0AbsKernelCS)
303BITS 32
304%endif
305ENDPROC cpumR0SaveGuestRestoreHostFPUState
306
307
308;;
309; Sets the host's FPU/XMM state
310;
311; @returns 0
312; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
313;
314align 16
315BEGINPROC cpumR0RestoreHostFPUState
316%ifdef RT_ARCH_AMD64
317 %ifdef RT_OS_WINDOWS
318 mov xDX, rcx
319 %else
320 mov xDX, rdi
321 %endif
322%else
323 mov xDX, dword [esp + 4]
324%endif
325
326 ; Restore FPU if guest has used it.
327 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
328 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
329 jz short .fpu_not_used
330
331 pushf ; The darwin kernel can get upset or upset things if an
332 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
333
334 ; Clear CR0 FPU bits to not cause exceptions, uses xCX
335 SAVE_CR0_CLEAR_FPU_TRAPS
336 ; Do NOT use xCX from this point!
337
338%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
339 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
340 jz .legacy_mode
341 db 0xea ; jmp far .sixtyfourbit_mode
342 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
343.legacy_mode:
344%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
345
346%ifdef RT_ARCH_AMD64
347 o64 fxrstor [xDX + CPUMCPU.Host.fpu]
348%else
349 fxrstor [xDX + CPUMCPU.Host.fpu]
350%endif
351
352.done:
353 ; Restore CR0 from xCX if it was previously saved.
354 RESTORE_CR0
355 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
356 popf
357.fpu_not_used:
358 xor eax, eax
359 ret
360
361%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
362ALIGNCODE(16)
363BITS 64
364.sixtyfourbit_mode:
365 and edx, 0ffffffffh
366 o64 fxrstor [rdx + CPUMCPU.Host.fpu]
367 jmp far [.fpret wrt rip]
368.fpret: ; 16:32 Pointer to .the_end.
369 dd .done, NAME(SUPR0AbsKernelCS)
370BITS 32
371%endif
372ENDPROC cpumR0RestoreHostFPUState
373
374
375%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
376;;
377; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
378;
379ALIGNCODE(16)
380BEGINPROC cpumR0SaveDRx
381%ifdef RT_ARCH_AMD64
382 %ifdef ASM_CALL64_GCC
383 mov xCX, rdi
384 %endif
385%else
386 mov xCX, dword [esp + 4]
387%endif
388 pushf ; Just to be on the safe side.
389 cli
390%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
391 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
392 jz .legacy_mode
393 db 0xea ; jmp far .sixtyfourbit_mode
394 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
395.legacy_mode:
396%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
397
398 ;
399 ; Do the job.
400 ;
401 mov xAX, dr0
402 mov xDX, dr1
403 mov [xCX], xAX
404 mov [xCX + 8 * 1], xDX
405 mov xAX, dr2
406 mov xDX, dr3
407 mov [xCX + 8 * 2], xAX
408 mov [xCX + 8 * 3], xDX
409
410.done:
411 popf
412 ret
413
414%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
415ALIGNCODE(16)
416BITS 64
417.sixtyfourbit_mode:
418 and ecx, 0ffffffffh
419
420 mov rax, dr0
421 mov rdx, dr1
422 mov r8, dr2
423 mov r9, dr3
424 mov [rcx], rax
425 mov [rcx + 8 * 1], rdx
426 mov [rcx + 8 * 2], r8
427 mov [rcx + 8 * 3], r9
428 jmp far [.fpret wrt rip]
429.fpret: ; 16:32 Pointer to .the_end.
430 dd .done, NAME(SUPR0AbsKernelCS)
431BITS 32
432%endif
433ENDPROC cpumR0SaveDRx
434
435
436;;
437; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
438;
439ALIGNCODE(16)
440BEGINPROC cpumR0LoadDRx
441%ifdef RT_ARCH_AMD64
442 %ifdef ASM_CALL64_GCC
443 mov xCX, rdi
444 %endif
445%else
446 mov xCX, dword [esp + 4]
447%endif
448 pushf ; Just to be on the safe side.
449 cli
450%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
451 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
452 jz .legacy_mode
453 db 0xea ; jmp far .sixtyfourbit_mode
454 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
455.legacy_mode:
456%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
457
458 ;
459 ; Do the job.
460 ;
461 mov xAX, [xCX]
462 mov xDX, [xCX + 8 * 1]
463 mov dr0, xAX
464 mov dr1, xDX
465 mov xAX, [xCX + 8 * 2]
466 mov xDX, [xCX + 8 * 3]
467 mov dr2, xAX
468 mov dr3, xDX
469
470.done:
471 popf
472 ret
473
474%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
475ALIGNCODE(16)
476BITS 64
477.sixtyfourbit_mode:
478 and ecx, 0ffffffffh
479
480 mov rax, [rcx]
481 mov rdx, [rcx + 8 * 1]
482 mov r8, [rcx + 8 * 2]
483 mov r9, [rcx + 8 * 3]
484 mov dr0, rax
485 mov dr1, rdx
486 mov dr2, r8
487 mov dr3, r9
488 jmp far [.fpret wrt rip]
489.fpret: ; 16:32 Pointer to .the_end.
490 dd .done, NAME(SUPR0AbsKernelCS)
491BITS 32
492%endif
493ENDPROC cpumR0LoadDRx
494
495%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
496
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use