VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0UnusedA.asm@ 43667

Last change on this file since 43667 was 37955, checked in by vboxsync, 13 years ago

Moved VBox/x86.h/mac to iprt/x86.h/mac.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 10.9 KB
Line 
1; $Id: CPUMR0UnusedA.asm 37955 2011-07-14 12:23:02Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29%ifdef IN_RING3
30 %error "The jump table doesn't link on leopard."
31%endif
32
33
34;*******************************************************************************
35;* External Symbols *
36;*******************************************************************************
37%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
38extern NAME(SUPR0AbsIs64bit)
39extern NAME(SUPR0Abs64bitKernelCS)
40extern NAME(SUPR0Abs64bitKernelSS)
41extern NAME(SUPR0Abs64bitKernelDS)
42extern NAME(SUPR0AbsKernelCS)
43extern NAME(g_fCPUMIs64bitHost)
44%endif
45
46
47;;
48; Restores the guest's FPU/XMM state
49;
50; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
51;
52; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
53;
54align 16
55BEGINPROC cpumR0LoadFPU
56%ifdef RT_ARCH_AMD64
57 %ifdef RT_OS_WINDOWS
58 mov xDX, rcx
59 %else
60 mov xDX, rdi
61 %endif
62%else
63 mov xDX, dword [esp + 4]
64%endif
65%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
66 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
67 jz .legacy_mode
68 db 0xea ; jmp far .sixtyfourbit_mode
69 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
70.legacy_mode:
71%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
72
73 fxrstor [xDX + CPUMCTX.fpu]
74.done:
75 ret
76
77%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
78ALIGNCODE(16)
79BITS 64
80.sixtyfourbit_mode:
81 and edx, 0ffffffffh
82 fxrstor [rdx + CPUMCTX.fpu]
83 jmp far [.fpret wrt rip]
84.fpret: ; 16:32 Pointer to .the_end.
85 dd .done, NAME(SUPR0AbsKernelCS)
86BITS 32
87%endif
88ENDPROC cpumR0LoadFPU
89
90
91;;
92; Restores the guest's FPU/XMM state
93;
94; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
95;
96; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
97;
98align 16
99BEGINPROC cpumR0SaveFPU
100%ifdef RT_ARCH_AMD64
101 %ifdef RT_OS_WINDOWS
102 mov xDX, rcx
103 %else
104 mov xDX, rdi
105 %endif
106%else
107 mov xDX, dword [esp + 4]
108%endif
109%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
110 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
111 jz .legacy_mode
112 db 0xea ; jmp far .sixtyfourbit_mode
113 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
114.legacy_mode:
115%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
116 fxsave [xDX + CPUMCTX.fpu]
117.done:
118 ret
119
120%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
121ALIGNCODE(16)
122BITS 64
123.sixtyfourbit_mode:
124 and edx, 0ffffffffh
125 fxsave [rdx + CPUMCTX.fpu]
126 jmp far [.fpret wrt rip]
127.fpret: ; 16:32 Pointer to .the_end.
128 dd .done, NAME(SUPR0AbsKernelCS)
129BITS 32
130%endif
131ENDPROC cpumR0SaveFPU
132
133
134;;
135; Restores the guest's XMM state
136;
137; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
138;
139; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
140;
141align 16
142BEGINPROC cpumR0LoadXMM
143%ifdef RT_ARCH_AMD64
144 %ifdef RT_OS_WINDOWS
145 mov xDX, rcx
146 %else
147 mov xDX, rdi
148 %endif
149%else
150 mov xDX, dword [esp + 4]
151%endif
152%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
153 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
154 jz .legacy_mode
155 db 0xea ; jmp far .sixtyfourbit_mode
156 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
157.legacy_mode:
158%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
159
160 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
161 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
162 movdqa xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
163 movdqa xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
164 movdqa xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
165 movdqa xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
166 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
167 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
168
169%ifdef RT_ARCH_AMD64
170 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
171 jz .done
172
173 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
174 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
175 movdqa xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
176 movdqa xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
177 movdqa xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
178 movdqa xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
179 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
180 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
181%endif
182.done:
183 ret
184
185%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
186ALIGNCODE(16)
187BITS 64
188.sixtyfourbit_mode:
189 and edx, 0ffffffffh
190
191 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
192 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
193 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
194 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
195 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
196 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
197 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
198 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
199
200 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
201 jz .sixtyfourbit_done
202
203 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
204 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
205 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
206 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
207 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
208 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
209 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
210 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
211.sixtyfourbit_done:
212 jmp far [.fpret wrt rip]
213.fpret: ; 16:32 Pointer to .the_end.
214 dd .done, NAME(SUPR0AbsKernelCS)
215BITS 32
216%endif
217ENDPROC cpumR0LoadXMM
218
219
220;;
221; Restores the guest's XMM state
222;
223; @param pCtx x86:[esp+4] GCC:rdi MSC:rcx CPUMCTX pointer
224;
225; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
226;
227align 16
228BEGINPROC cpumR0SaveXMM
229%ifdef RT_ARCH_AMD64
230 %ifdef RT_OS_WINDOWS
231 mov xDX, rcx
232 %else
233 mov xDX, rdi
234 %endif
235%else
236 mov xDX, dword [esp + 4]
237%endif
238%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
239 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
240 jz .legacy_mode
241 db 0xea ; jmp far .sixtyfourbit_mode
242 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
243.legacy_mode:
244%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
245
246 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
247 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
248 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
249 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
250 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
251 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
252 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
253 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
254
255%ifdef RT_ARCH_AMD64
256 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
257 jz .done
258
259 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
260 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
261 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
262 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
263 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
264 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
265 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
266 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
267
268%endif
269.done:
270 ret
271
272%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
273ALIGNCODE(16)
274BITS 64
275.sixtyfourbit_mode:
276 and edx, 0ffffffffh
277
278 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
279 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
280 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
281 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
282 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
283 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
284 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
285 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
286
287 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
288 jz .sixtyfourbit_done
289
290 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
291 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
292 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
293 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
294 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
295 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
296 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
297 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
298
299.sixtyfourbit_done:
300 jmp far [.fpret wrt rip]
301.fpret: ; 16:32 Pointer to .the_end.
302 dd .done, NAME(SUPR0AbsKernelCS)
303BITS 32
304%endif
305ENDPROC cpumR0SaveXMM
306
307
308;;
309; Set the FPU control word; clearing exceptions first
310;
311; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word
312align 16
313BEGINPROC cpumR0SetFCW
314%ifdef RT_ARCH_AMD64
315 %ifdef RT_OS_WINDOWS
316 mov xAX, rcx
317 %else
318 mov xAX, rdi
319 %endif
320%else
321 mov xAX, dword [esp + 4]
322%endif
323 fnclex
324 push xAX
325 fldcw [xSP]
326 pop xAX
327 ret
328ENDPROC cpumR0SetFCW
329
330
331;;
332; Get the FPU control word
333;
334align 16
335BEGINPROC cpumR0GetFCW
336 fnstcw [xSP - 8]
337 mov ax, word [xSP - 8]
338 ret
339ENDPROC cpumR0GetFCW
340
341
342;;
343; Set the MXCSR;
344;
345; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR
346align 16
347BEGINPROC cpumR0SetMXCSR
348%ifdef RT_ARCH_AMD64
349 %ifdef RT_OS_WINDOWS
350 mov xAX, rcx
351 %else
352 mov xAX, rdi
353 %endif
354%else
355 mov xAX, dword [esp + 4]
356%endif
357 push xAX
358 ldmxcsr [xSP]
359 pop xAX
360 ret
361ENDPROC cpumR0SetMXCSR
362
363
364;;
365; Get the MXCSR
366;
367align 16
368BEGINPROC cpumR0GetMXCSR
369 stmxcsr [xSP - 8]
370 mov eax, dword [xSP - 8]
371 ret
372ENDPROC cpumR0GetMXCSR
373
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use