- Timestamp:
- May 19, 2016 7:12:56 PM (8 years ago)
- Location:
- trunk
- Files:
-
- 1 deleted
- 12 edited
-
include/VBox/vmm/cpum.h (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/CPUMAllRegs.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMMR0/CPUMR0.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMMR0/CPUMR0A.asm (modified) (18 diffs)
-
src/VBox/VMM/VMMR0/CPUMR0UnusedA.asm (deleted)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMRC/CPUMRCA.asm (modified) (3 diffs)
-
src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac (modified) (4 diffs)
-
src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac (modified) (7 diffs)
-
src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac (modified) (3 diffs)
-
src/VBox/VMM/include/CPUMInternal.h (modified) (4 diffs)
-
src/VBox/VMM/include/CPUMInternal.mac (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r60762 r61058 1366 1366 VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM); 1367 1367 VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu); 1368 VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu); 1368 1369 VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu); 1369 1370 VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu); … … 1483 1484 VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void); 1484 1485 VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM); 1485 VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu , PCPUMCTX pCtx);1486 VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu , PCPUMCTX pCtx);1487 VMMR0_INT_DECL( int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);1486 VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu); 1487 VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu); 1488 VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu); 1488 1489 VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu); 1489 1490 VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6); -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r60821 r61058 649 649 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))) 650 650 { 651 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU ))651 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)) 652 652 { 653 653 /* 654 * We haven't saved the host FPU state yet, so TS and MT are both set654 * We haven't loaded the guest FPU state yet, so TS and MT are both set 655 655 * and EM should be reflecting the guest EM (it always does this). 656 656 */ … … 677 677 { 678 678 /* 679 * Already saved thestate, so we're just mirroring679 * Already loaded the guest FPU state, so we're just mirroring 680 680 * the guest flags. 681 681 */ … … 2163 2163 { 2164 2164 #if defined(IN_RING0) || defined(IN_RC) 2165 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU )2165 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST) 2166 2166 { 2167 2167 if (pVCpu->cpum.s.Guest.fXStateMask != 0) … … 2631 2631 /** 2632 2632 * Checks if we activated the FPU/XMM state of the guest OS. 2633 * 2633 2634 * @returns true if we did. 2634 2635 * @returns false if not. … … 2637 2638 VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu) 2638 2639 { 2639 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU); 2640 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST); 2641 } 2642 2643 2644 /** 2645 * Checks if we saved the FPU/XMM state of the host OS. 2646 * 2647 * @returns true / false. 2648 * @param pVCpu The cross context virtual CPU structure. 2649 */ 2650 VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu) 2651 { 2652 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST); 2640 2653 } 2641 2654 -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r58123 r61058 330 330 * @param pVM The cross context VM structure. 331 331 * @param pVCpu The cross context virtual CPU structure. 332 * @param pCtx Pointer to the guest-CPU context. 333 */ 334 VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 332 */ 333 VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu) 335 334 { 336 335 Assert(pVM->cpum.s.HostFeatures.fFxSaveRstor); … … 340 339 if (CPUMIsGuestFPUStateActive(pVCpu)) 341 340 { 342 Assert( ((p Ctx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS))343 || ((p Ctx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM)));341 Assert( ((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) 342 || ((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM))); 344 343 return VINF_EM_RAW_GUEST_TRAP; 345 344 } … … 370 369 */ 371 370 372 switch (p Ctx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))371 switch (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) 373 372 { 374 373 case X86_CR0_MP | X86_CR0_TS: … … 379 378 } 380 379 381 return CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx); 382 } 383 384 385 /** 386 * Saves the host-FPU/XMM state and loads the guest-FPU state into the CPU. 387 * 388 * @returns VBox status code. 380 return CPUMR0LoadGuestFPU(pVM, pVCpu); 381 } 382 383 384 /** 385 * Saves the host-FPU/XMM state (if necessary) and (always) loads the guest-FPU 386 * state into the CPU. 387 * 388 * @returns VINF_SUCCESS (for CPUMR0Trap07Handler). 389 389 * 390 390 * @param pVM The cross context VM structure. 391 391 * @param pVCpu The cross context virtual CPU structure. 392 * @param pCtx Pointer to the guest-CPU context. 393 */ 394 VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 392 */ 393 VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu) 395 394 { 396 395 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 396 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)); 397 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)); 398 397 399 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 398 if (CPUMIsGuestInLongModeEx(pCtx)) 399 { 400 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)); 401 402 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */ 403 cpumR0SaveHostFPUState(&pVCpu->cpum.s); 400 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 401 { 402 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)); 403 404 /* Save the host state if necessary. */ 405 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST)) 406 cpumR0SaveHostFPUState(&pVCpu->cpum.s); 404 407 405 408 /* Restore the state on entry as we need to be in 64-bit mode to access the full state. */ 406 409 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE; 410 411 Assert( (pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM)) 412 == (CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM)); 407 413 } 408 414 else 409 415 #endif 410 416 { 411 NOREF(pCtx); 412 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)); 413 /** @todo Move the FFXR handling down into 414 * cpumR0SaveHostRestoreGuestFPUState to optimize the 415 * VBOX_WITH_KERNEL_USING_XMM handling. */ 416 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 417 uint64_t uHostEfer = 0; 418 bool fRestoreEfer = false; 419 if (pVM->cpum.s.HostFeatures.fLeakyFxSR) 420 { 417 if (!pVM->cpum.s.HostFeatures.fLeakyFxSR) 418 { 419 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)); 420 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s); 421 } 422 else 423 { 424 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) || (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST)); 421 425 /** @todo r=ramshankar: Can't we used a cached value here 422 426 * instead of reading the MSR? host EFER doesn't usually 423 427 * change. */ 424 uHostEfer = ASMRdMsr(MSR_K6_EFER); 425 if (uHostEfer & MSR_K6_EFER_FFXSR) 428 uint64_t uHostEfer = ASMRdMsr(MSR_K6_EFER); 429 if (!(uHostEfer & MSR_K6_EFER_FFXSR)) 430 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s); 431 else 426 432 { 433 RTCCUINTREG const uSavedFlags = ASMIntDisableFlags(); 434 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE; 427 435 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR); 428 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE; 429 fRestoreEfer = true; 436 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s); 437 ASMWrMsr(MSR_K6_EFER, uHostEfer | MSR_K6_EFER_FFXSR); 438 ASMSetFlags(uSavedFlags); 430 439 } 431 440 } 432 433 /* Do the job and record that we've switched FPU state. */ 434 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s); 435 436 /* Restore EFER. */ 437 if (fRestoreEfer) 438 ASMWrMsr(MSR_K6_EFER, uHostEfer); 439 } 440 441 Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)); 441 Assert( (pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM)) 442 == (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM)); 443 } 442 444 return VINF_SUCCESS; 443 445 } … … 445 447 446 448 /** 447 * Save guest FPU/XMM state448 * 449 * @returns VBox status code.450 * @ param pVM The cross context VM structure.449 * Saves the guest FPU/XMM state if needed, restores the host FPU/XMM state as 450 * needed. 451 * 452 * @returns true if we saved the guest state. 451 453 * @param pVCpu The cross context virtual CPU structure. 452 * @param pCtx Pointer to the guest CPU context.453 */ 454 VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 455 { 456 Assert(pV M->cpum.s.HostFeatures.fFxSaveRstor);454 */ 455 VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu) 456 { 457 bool fSavedGuest; 458 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.HostFeatures.fFxSaveRstor); 457 459 Assert(ASMGetCR4() & X86_CR4_OSFXSR); 458 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);459 NOREF(pVM); NOREF(pCtx);460 460 if (pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)) 461 { 462 fSavedGuest = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST); 461 463 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 462 if (CPUMIsGuestInLongModeEx(pCtx)) 463 { 464 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)) 465 { 466 HMR0SaveFPUState(pVM, pVCpu, pCtx); 464 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 465 { 466 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST) 467 { 468 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)); 469 HMR0SaveFPUState(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 470 } 471 else 472 pVCpu->cpum.s.fUseFlags &= ~CPUM_SYNC_FPU_STATE; 467 473 cpumR0RestoreHostFPUState(&pVCpu->cpum.s); 468 474 } 469 /* else nothing to do; we didn't perform a world switch */ 475 else 476 #endif 477 { 478 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)) 479 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s); 480 else 481 { 482 /* Temporarily clear MSR_K6_EFER_FFXSR or else we'll be unable to 483 save/restore the XMM state with fxsave/fxrstor. */ 484 uint64_t uHostEfer = ASMRdMsr(MSR_K6_EFER); 485 if (uHostEfer & MSR_K6_EFER_FFXSR) 486 { 487 RTCCUINTREG const uSavedFlags = ASMIntDisableFlags(); 488 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR); 489 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s); 490 ASMWrMsr(MSR_K6_EFER, uHostEfer | MSR_K6_EFER_FFXSR); 491 ASMSetFlags(uSavedFlags); 492 } 493 else 494 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s); 495 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_MANUAL_XMM_RESTORE; 496 } 497 } 470 498 } 471 499 else 472 #endif 473 { 474 #ifdef VBOX_WITH_KERNEL_USING_XMM 475 /* 476 * We've already saved the XMM registers in the assembly wrapper, so 477 * we have to save them before saving the entire FPU state and put them 478 * back afterwards. 479 */ 480 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but 481 * I'm not able to test such an optimization tonight. 482 * We could just all this in assembly. */ 483 uint128_t aGuestXmmRegs[16]; 484 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.CTX_SUFF(pXState)->x87.aXMM[0], sizeof(aGuestXmmRegs)); 485 #endif 486 487 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 488 uint64_t uHostEfer = 0; 489 bool fRestoreEfer = false; 490 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 491 { 492 uHostEfer = ASMRdMsr(MSR_K6_EFER); 493 if (uHostEfer & MSR_K6_EFER_FFXSR) 494 { 495 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR); 496 fRestoreEfer = true; 497 } 498 } 499 500 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s); 501 502 /* Restore EFER MSR */ 503 if (fRestoreEfer) 504 ASMWrMsr(MSR_K6_EFER, uHostEfer | MSR_K6_EFER_FFXSR); 505 506 #ifdef VBOX_WITH_KERNEL_USING_XMM 507 memcpy(&pVCpu->cpum.s.Guest.CTX_SUFF(pXState)->x87.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs)); 508 #endif 509 } 510 511 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE); 512 return VINF_SUCCESS; 500 fSavedGuest = false; 501 Assert(!( pVCpu->cpum.s.fUseFlags 502 & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE))); 503 return fSavedGuest; 513 504 } 514 505 -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r61031 r61058 5 5 6 6 ; 7 ; Copyright (C) 2006-201 5Oracle Corporation7 ; Copyright (C) 2006-2016 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 16 16 ; 17 17 18 18 19 ;******************************************************************************* 19 20 ;* Header Files * 20 21 ;******************************************************************************* 22 %define RT_ASM_WITH_SEH64 23 %include "iprt/asmdefs.mac" 21 24 %include "VBox/asmdefs.mac" 22 25 %include "VBox/vmm/vm.mac" … … 27 30 %include "VBox/vmm/cpum.mac" 28 31 29 %ifdef IN_RING330 %error "The jump table doesn't link on leopard."31 %endif32 32 33 33 ;******************************************************************************* … … 123 123 ; @uses rax, rdx 124 124 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. 125 ; @param pXState Define for the reg siter containing the extended state pointer.125 ; @param pXState Define for the register containing the extended state pointer. 126 126 ; 127 127 %macro CPUMR0_SAVE_HOST 0 … … 164 164 ; @uses rax, rdx 165 165 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. 166 ; @param pXState Define for the reg siter containing the extended state pointer.166 ; @param pXState Define for the register containing the extended state pointer. 167 167 ; 168 168 %macro CPUMR0_LOAD_HOST 0 … … 246 246 ; @uses rax, rdx 247 247 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. 248 ; @param pXState Define for the reg siter containing the extended state pointer.248 ; @param pXState Define for the register containing the extended state pointer. 249 249 ; 250 250 %macro CPUMR0_SAVE_GUEST 0 … … 314 314 ; @uses rax, rdx 315 315 ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. 316 ; @param pXState Define for the reg siter containing the extended state pointer.316 ; @param pXState Define for the register containing the extended state pointer. 317 317 ; 318 318 %macro CPUMR0_LOAD_GUEST 0 … … 352 352 ; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state. 353 353 ; 354 ; @returns 0355 354 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 356 355 ; 357 356 align 16 358 357 BEGINPROC cpumR0SaveHostRestoreGuestFPUState 358 push xBP 359 SEH64_PUSH_xBP 360 mov xBP, xSP 361 SEH64_SET_FRAME_xBP 0 362 SEH64_END_PROLOGUE 363 359 364 ; 360 365 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. … … 369 374 %define pXState r10 370 375 %else 371 push ebp372 mov ebp, esp373 376 push ebx 374 377 push esi … … 381 384 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 382 385 386 %ifdef VBOX_WITH_KERNEL_USING_XMM 387 movaps xmm0, xmm0 ; Make 100% sure it's used before we save it or mess with CR0/XCR0. 388 %endif 383 389 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 384 390 391 ; 392 ; Save the host state. 393 ; 394 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_HOST 395 jnz .already_saved_host 385 396 CPUMR0_SAVE_HOST 397 %ifdef VBOX_WITH_KERNEL_USING_XMM 398 jmp .load_guest 399 %endif 400 .already_saved_host: 401 %ifdef VBOX_WITH_KERNEL_USING_XMM 402 ; If we didn't save the host state, we must save the non-volatile XMM registers. 403 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 404 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 060h], xmm6 405 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 070h], xmm7 406 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 080h], xmm8 407 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 090h], xmm9 408 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10 409 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11 410 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12 411 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13 412 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14 413 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15 414 415 ; 416 ; Load the guest state. 417 ; 418 .load_guest: 419 %endif 386 420 CPUMR0_LOAD_GUEST 387 421 … … 401 435 %endif 402 436 437 ;; @todo Save CR0 + XCR0 bits related to FPU, SSE and AVX*, leaving these register sets accessible to IEM. 403 438 RESTORE_CR0 xCX 404 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)439 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_SINCE_REM | CPUM_USED_FPU_HOST) 405 440 popf 406 441 … … 408 443 pop esi 409 444 pop ebx 445 %endif 410 446 leave 411 %endif412 xor eax, eax413 447 ret 414 448 ENDPROC cpumR0SaveHostRestoreGuestFPUState 415 449 416 450 417 %ifndef RT_ARCH_AMD64418 %ifdef VBOX_WITH_64_BITS_GUESTS419 451 ;; 420 452 ; Saves the host FPU/SSE/AVX state. … … 425 457 align 16 426 458 BEGINPROC cpumR0SaveHostFPUState 427 ; 428 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. 429 ; 430 %ifdef RT_ARCH_AMD64 431 %ifdef RT_OS_WINDOWS 432 mov r11, rcx 433 %else 434 mov r11, rdi 435 %endif 436 %define pCpumCpu r11 437 %define pXState r10 438 %else 439 push ebp 440 mov ebp, esp 441 push ebx 442 push esi 443 mov ebx, dword [ebp + 8] 444 %define pCpumCpu ebx 445 %define pXState esi 446 %endif 447 448 pushf ; The darwin kernel can get upset or upset things if an 449 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 450 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 451 452 CPUMR0_SAVE_HOST 453 454 RESTORE_CR0 xCX 455 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 456 popf 457 458 %ifdef RT_ARCH_X86 459 pop esi 460 pop ebx 461 leave 462 %endif 463 xor eax, eax 464 ret 465 %undef pCpumCpu 466 %undef pXState 467 ENDPROC cpumR0SaveHostFPUState 468 %endif 469 %endif 470 471 472 ;; 473 ; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state. 474 ; 475 ; @returns VINF_SUCCESS (0) in eax. 476 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 477 ; 478 align 16 479 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 459 push xBP 460 SEH64_PUSH_xBP 461 mov xBP, xSP 462 SEH64_SET_FRAME_xBP 0 463 SEH64_END_PROLOGUE 464 480 465 ; 481 466 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. … … 490 475 %define pXState r10 491 476 %else 492 push ebp493 mov ebp, esp494 477 push ebx 495 478 push esi … … 499 482 %endif 500 483 501 ;502 ; Only restore FPU if guest has used it.503 ;504 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU505 jz .fpu_not_used506 507 484 pushf ; The darwin kernel can get upset or upset things if an 508 485 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 486 %ifdef VBOX_WITH_KERNEL_USING_XMM 487 movaps xmm0, xmm0 ; Make 100% sure it's used before we save it or mess with CR0/XCR0. 488 %endif 509 489 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 510 490 511 CPUMR0_SAVE_ GUEST512 CPUMR0_LOAD_HOST491 CPUMR0_SAVE_HOST 492 ;; @todo Save CR0 + XCR0 bits related to FPU, SSE and AVX*, leaving these register sets accessible to IEM. 513 493 514 494 RESTORE_CR0 xCX 515 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU495 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_HOST | CPUM_USED_FPU_SINCE_REM) ; Latter is not necessarily true, but normally yes. 516 496 popf 517 497 518 .fpu_not_used:519 498 %ifdef RT_ARCH_X86 520 499 pop esi 521 500 pop ebx 501 %endif 522 502 leave 523 %endif524 xor eax, eax525 503 ret 526 504 %undef pCpumCpu 527 505 %undef pXState 528 ENDPROC cpumR0SaveGuestRestoreHostFPUState 529 530 531 ;; 532 ; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host. 533 ; 534 ; @returns 0 506 ENDPROC cpumR0SaveHostFPUState 507 508 509 ;; 510 ; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state. 511 ; 535 512 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 536 513 ; 537 514 align 16 538 BEGINPROC cpumR0RestoreHostFPUState 515 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 516 push xBP 517 SEH64_PUSH_xBP 518 mov xBP, xSP 519 SEH64_SET_FRAME_xBP 0 520 SEH64_END_PROLOGUE 521 539 522 ; 540 523 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. … … 549 532 %define pXState r10 550 533 %else 534 push ebx 535 push esi 536 mov ebx, dword [ebp + 8] 537 %define pCpumCpu ebx 538 %define pXState esi 539 %endif 540 pushf ; The darwin kernel can get upset or upset things if an 541 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 542 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 543 544 545 %ifdef VBOX_WITH_KERNEL_USING_XMM 546 ; 547 ; Copy non-volatile XMM registers to the host state so we can use 548 ; them while saving the guest state (we've gotta do this anyway). 549 ; 550 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 551 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 060h], xmm6 552 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 070h], xmm7 553 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 080h], xmm8 554 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 090h], xmm9 555 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10 556 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11 557 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12 558 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13 559 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14 560 movdqa [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15 561 %endif 562 563 ; 564 ; Save the guest state if necessary. 565 ; 566 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_GUEST 567 jz .load_only_host 568 569 %ifdef VBOX_WITH_KERNEL_USING_XMM 570 ; Load the guest XMM register values we already saved in HMR0VMXStartVMWrapXMM. 571 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 572 movdqa xmm0, [pXState + XMM_OFF_IN_X86FXSTATE + 000h] 573 movdqa xmm1, [pXState + XMM_OFF_IN_X86FXSTATE + 010h] 574 movdqa xmm2, [pXState + XMM_OFF_IN_X86FXSTATE + 020h] 575 movdqa xmm3, [pXState + XMM_OFF_IN_X86FXSTATE + 030h] 576 movdqa xmm4, [pXState + XMM_OFF_IN_X86FXSTATE + 040h] 577 movdqa xmm5, [pXState + XMM_OFF_IN_X86FXSTATE + 050h] 578 movdqa xmm6, [pXState + XMM_OFF_IN_X86FXSTATE + 060h] 579 movdqa xmm7, [pXState + XMM_OFF_IN_X86FXSTATE + 070h] 580 movdqa xmm8, [pXState + XMM_OFF_IN_X86FXSTATE + 080h] 581 movdqa xmm9, [pXState + XMM_OFF_IN_X86FXSTATE + 090h] 582 movdqa xmm10, [pXState + XMM_OFF_IN_X86FXSTATE + 0a0h] 583 movdqa xmm11, [pXState + XMM_OFF_IN_X86FXSTATE + 0b0h] 584 movdqa xmm12, [pXState + XMM_OFF_IN_X86FXSTATE + 0c0h] 585 movdqa xmm13, [pXState + XMM_OFF_IN_X86FXSTATE + 0d0h] 586 movdqa xmm14, [pXState + XMM_OFF_IN_X86FXSTATE + 0e0h] 587 movdqa xmm15, [pXState + XMM_OFF_IN_X86FXSTATE + 0f0h] 588 %endif 589 CPUMR0_SAVE_GUEST 590 591 ; 592 ; Load the host state. 593 ; 594 .load_only_host: 595 CPUMR0_LOAD_HOST 596 597 ;; @todo Restore CR0 + XCR0 bits related to FPU, SSE and AVX* (for IEM). 598 RESTORE_CR0 xCX 599 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) 600 601 popf 602 %ifdef RT_ARCH_X86 603 pop esi 604 pop ebx 605 %endif 606 leave 607 ret 608 %undef pCpumCpu 609 %undef pXState 610 ENDPROC cpumR0SaveGuestRestoreHostFPUState 611 612 613 %if ARCH_BITS == 32 614 %ifdef VBOX_WITH_64_BITS_GUESTS 615 ;; 616 ; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host. 617 ; 618 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 619 ; 620 align 16 621 BEGINPROC cpumR0RestoreHostFPUState 622 ; 623 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. 624 ; 551 625 push ebp 552 626 mov ebp, esp … … 554 628 push esi 555 629 mov ebx, dword [ebp + 8] 556 %define pCpumCpu ebx 557 %define pXState esi 558 %endif 559 560 ; 561 ; Restore FPU if guest has used it. 562 ; 563 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU 564 jz short .fpu_not_used 565 630 %define pCpumCpu ebx 631 %define pXState esi 632 633 ; 634 ; Restore host CPU state. 635 ; 566 636 pushf ; The darwin kernel can get upset or upset things if an 567 637 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. … … 571 641 572 642 RESTORE_CR0 xCX 573 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 643 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_HOST 574 644 popf 575 645 576 .fpu_not_used:577 %ifdef RT_ARCH_X86578 646 pop esi 579 647 pop ebx 580 648 leave 581 %endif582 xor eax, eax583 649 ret 584 %undef pCpumCPu585 %undef pXState650 %undef pCpumCPu 651 %undef pXState 586 652 ENDPROC cpumR0RestoreHostFPUState 587 653 %endif ; VBOX_WITH_64_BITS_GUESTS 654 %endif ; ARCH_BITS == 32 655 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r60894 r61058 2094 2094 2095 2095 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 2096 if (CPUMIsGuestFPUStateActive(pVCpu)) 2097 { 2098 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 2099 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 2096 if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu)) 2100 2097 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 2101 }2102 2098 2103 2099 /* … … 2206 2202 HM_DISABLE_PREEMPT(); 2207 2203 2208 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 2209 if (CPUMIsGuestFPUStateActive(pVCpu)) 2210 CPUMR0SaveGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser); 2204 /* Restore host FPU state if necessary and resync on next R0 reentry. */ 2205 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu); 2211 2206 2212 2207 /* Restore host debug registers if necessary and resync on next R0 reentry. */ … … 3125 3120 && !CPUMIsGuestFPUStateActive(pVCpu)) 3126 3121 { 3127 CPUMR0LoadGuestFPU(pVM, pVCpu , pCtx);3122 CPUMR0LoadGuestFPU(pVM, pVCpu); 3128 3123 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 3129 3124 } … … 5393 5388 Assert(!pSvmTransient->fWasGuestFPUStateActive); 5394 5389 #endif 5395 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu , pCtx);5390 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu); 5396 5391 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu))); 5397 5392 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r61013 r61058 7015 7015 7016 7016 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 7017 if (CPUM IsGuestFPUStateActive(pVCpu))7018 { 7019 /* We shouldn't reload CR0 without saving it first. */7020 if (!fSaveGuestState)7021 {7017 if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu)) 7018 { 7019 if (fSaveGuestState) 7020 { 7021 /* We shouldn't reload CR0 without saving it first. */ 7022 7022 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7023 7023 AssertRCReturn(rc, rc); 7024 7024 } 7025 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);7026 Assert(!CPUMIsGuestFPUStateActive(pVCpu));7027 7025 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 7028 7026 } … … 7281 7279 RTThreadPreemptDisable(&PreemptState); 7282 7280 7283 PVM pVM = pVCpu->CTX_SUFF(pVM); 7284 if (CPUMIsGuestFPUStateActive(pVCpu)) 7285 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser); 7286 7281 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu); 7287 7282 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */); 7288 7283 … … 7295 7290 7296 7291 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 7297 if ( pV M->hm.s.fAllow64BitGuests7298 && pVCpu-> hm.s.vmx.fLazyMsrs)7292 if ( pVCpu->hm.s.vmx.fLazyMsrs 7293 && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 7299 7294 hmR0VmxLazyRestoreHostMsrs(pVCpu); 7300 7295 #endif … … 8631 8626 #ifdef HMVMX_ALWAYS_SWAP_FPU_STATE 8632 8627 if (!CPUMIsGuestFPUStateActive(pVCpu)) 8633 CPUMR0LoadGuestFPU(pVM, pVCpu , pMixedCtx);8628 CPUMR0LoadGuestFPU(pVM, pVCpu); 8634 8629 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 8635 8630 #endif … … 8638 8633 && !CPUMIsGuestFPUStateActive(pVCpu)) 8639 8634 { 8640 CPUMR0LoadGuestFPU(pVM, pVCpu , pMixedCtx);8635 CPUMR0LoadGuestFPU(pVM, pVCpu); 8641 8636 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0)); 8642 8637 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); … … 8793 8788 8794 8789 #ifdef HMVMX_ALWAYS_SWAP_FPU_STATE 8795 if (CPUM IsGuestFPUStateActive(pVCpu))8790 if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVM, pVCpu)) 8796 8791 { 8797 8792 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 8798 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);8799 8793 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 8800 8794 } … … 12993 12987 Assert(!pVmxTransient->fWasGuestFPUStateActive || pVCpu->hm.s.fUsingDebugLoop); 12994 12988 #endif 12995 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu , pMixedCtx);12989 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu); 12996 12990 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu))); 12997 12991 } -
trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm
r60891 r61058 100 100 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3. 101 101 ; 102 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU 102 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_GUEST 103 103 jz hlfpua_not_loaded 104 104 jmp hlfpua_guest_trap … … 145 145 mov cr0, edx ; Clear flags so we don't trap here. 146 146 147 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_HOST 148 jnz hlfpua_host_done 149 147 150 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask] 148 151 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC] … … 168 171 169 172 hlfpua_finished_switch: 170 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)173 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_HOST | CPUM_USED_FPU_GUEST | CPUM_USED_FPU_SINCE_REM) 171 174 172 175 ; Load new CR0 value. -
trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
r58122 r61058 454 454 ;; handle use flags. 455 455 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags. 456 and esi, ~ CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!456 and esi, ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) ; Clear CPUM_USED_* flags. 457 457 mov [rdx + r8 + CPUMCPU.fUseFlags], esi 458 458 … … 1064 1064 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 1065 1065 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags. 1066 test esi, CPUM_USED_FPU1066 test esi, (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) 1067 1067 jz gth_fpu_no 1068 1068 mov rcx, cr0 … … 1072 1072 mov r10, rdx ; Save rdx. 1073 1073 1074 test esi, CPUM_USED_FPU_GUEST 1075 jz gth_fpu_host 1076 1074 1077 mov eax, [r10 + r8 + CPUMCPU.Guest.fXStateMask] 1075 mov r9, [r10 + r8 + CPUMCPU.Guest.pXStateR0]1078 mov r9, [r10 + r8 + CPUMCPU.Guest.pXStateR0] 1076 1079 or eax, eax 1077 1080 jz gth_fpu_guest_fxsave … … 1084 1087 gth_fpu_host: 1085 1088 mov eax, [r10 + r8 + CPUMCPU.Host.fXStateMask] 1086 mov r9, [r10 + r8 + CPUMCPU.Host.pXStateR0]1089 mov r9, [r10 + r8 + CPUMCPU.Host.pXStateR0] 1087 1090 or eax, eax 1088 1091 jz gth_fpu_host_fxrstor -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r58123 r61058 677 677 mov cr0, rcx ; and restore old CR0 again 678 678 679 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE 679 and esi, ~CPUM_SYNC_FPU_STATE 680 or esi, CPUM_USED_FPU_GUEST 681 mov [rdx + CPUMCPU.fUseFlags], esi 680 682 681 683 htg_fpu_no: … … 702 704 mov dr6, rax ; not required for AMD-V 703 705 704 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_GUEST 705 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_GUEST 706 and esi, ~CPUM_SYNC_DEBUG_REGS_GUEST 707 or esi, CPUM_USED_DEBUG_REGS_GUEST 708 mov [rdx + CPUMCPU.fUseFlags], esi 706 709 jmp htg_debug_done 707 710 … … 719 722 mov dr6, rax ; not required for AMD-V 720 723 721 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_HYPER 722 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER 724 and esi, ~CPUM_SYNC_DEBUG_REGS_HYPER 725 or esi, CPUM_USED_DEBUG_REGS_HYPER 726 mov [rdx + CPUMCPU.fUseFlags], esi 723 727 724 728 htg_debug_done: … … 732 736 ; 733 737 734 ; parameter for all helper functions (pCtx) 738 ; parameter for all helper functions (pCtx) (in addition to rdx = pCPUM ofc) 735 739 DEBUG64_CHAR('9') 736 740 lea rsi, [rdx + CPUMCPU.Guest] … … 1353 1357 ; * @returns VBox status code 1354 1358 ; * @param pCtx Guest context [rsi] 1359 ; * @param pCPUM Pointer to CPUMCPU [rdx] 1355 1360 ; */ 1356 1361 BEGINPROC HMRCSaveGuestFPU64 … … 1367 1372 mov edx, [rsi + CPUMCTX.fXStateMask + 4] 1368 1373 o64 xsave [rbx] 1369 jmp .done 1374 jmp .done . 1370 1375 1371 1376 .use_fxsave: … … 1374 1379 .done: 1375 1380 mov cr0, rcx ; and restore old CR0 again 1381 1382 and [rdx + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_GUEST 1376 1383 1377 1384 mov eax, VINF_SUCCESS -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r56287 r61058 384 384 ;; handle use flags. 385 385 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags. 386 and esi, ~ CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!386 and esi, ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) ; Clear CPUM_USED_* flags. 387 387 mov [edx + CPUMCPU.fUseFlags], esi 388 388 … … 984 984 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 985 985 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags. 986 test esi, CPUM_USED_FPU986 test esi, (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST) 987 987 jz near gth_fpu_no 988 988 mov ecx, cr0 … … 991 991 992 992 mov ebx, edx ; save edx 993 994 test esi, CPUM_USED_FPU_GUEST 995 jz gth_fpu_host 993 996 994 997 mov eax, [ebx + CPUMCPU.Guest.fXStateMask] -
trunk/src/VBox/VMM/include/CPUMInternal.h
r58996 r61058 63 63 /** Use flags (CPUM::fUseFlags). 64 64 * (Don't forget to sync this with CPUMInternal.mac !) 65 * @note Part of saved state. 65 66 * @{ */ 66 /** Used the FPU, SSE or such stuff. */ 67 #define CPUM_USED_FPU RT_BIT(0) 68 /** Used the FPU, SSE or such stuff since last we were in REM. 67 /** Indicates that we've saved the host FPU, SSE, whatever state and that it 68 * needs to be restored. */ 69 #define CPUM_USED_FPU_HOST RT_BIT(0) 70 /** Indicates that we've loaded the guest FPU, SSE, whatever state and that it 71 * needs to be saved. */ 72 #define CPUM_USED_FPU_GUEST RT_BIT(10) 73 /** Used the guest FPU, SSE or such stuff since last we were in REM. 69 74 * REM syncing is clearing this, lazy FPU is setting it. */ 70 75 #define CPUM_USED_FPU_SINCE_REM RT_BIT(1) … … 94 99 * DR7 (and AMD-V DR6) are handled via the VMCB. */ 95 100 #define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9) 96 97 101 98 102 /** Sync the FPU state on next entry (32->64 switcher only). */ … … 511 515 PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit); 512 516 513 # ifdef IN_RING3517 # ifdef IN_RING3 514 518 int cpumR3DbgInit(PVM pVM); 515 519 int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures); … … 526 530 int cpumR3MsrStrictInitChecks(void); 527 531 PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr); 528 # endif529 530 # ifdef IN_RC532 # endif 533 534 # ifdef IN_RC 531 535 DECLASM(int) cpumHandleLazyFPUAsm(PCPUMCPU pCPUM); 532 #endif 533 534 #ifdef IN_RING0 535 DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM); 536 DECLASM(int) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM); 537 DECLASM(int) cpumR0SaveHostFPUState(PCPUMCPU pCPUM); 538 DECLASM(int) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM); 539 DECLASM(void) cpumR0LoadFPU(PCPUMCTX pCtx); 540 DECLASM(void) cpumR0SaveFPU(PCPUMCTX pCtx); 541 DECLASM(void) cpumR0LoadXMM(PCPUMCTX pCtx); 542 DECLASM(void) cpumR0SaveXMM(PCPUMCTX pCtx); 543 DECLASM(void) cpumR0SetFCW(uint16_t u16FCW); 544 DECLASM(uint16_t) cpumR0GetFCW(void); 545 DECLASM(void) cpumR0SetMXCSR(uint32_t u32MXCSR); 546 DECLASM(uint32_t) cpumR0GetMXCSR(void); 547 #endif 536 # endif 537 538 # ifdef IN_RING0 539 DECLASM(void) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM); 540 DECLASM(void) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM); 541 DECLASM(void) cpumR0SaveHostFPUState(PCPUMCPU pCPUM); 542 # if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 543 DECLASM(void) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM); 544 # endif 545 # endif 548 546 549 547 RT_C_DECLS_END -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r57446 r61058 39 39 40 40 41 %define CPUM_USED_FPU RT_BIT(0) 41 %define CPUM_USED_FPU_HOST RT_BIT(0) 42 %define CPUM_USED_FPU_GUEST RT_BIT(10) 42 43 %define CPUM_USED_FPU_SINCE_REM RT_BIT(1) 43 44 %define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
Note:
See TracChangeset
for help on using the changeset viewer.

