VirtualBox

Changeset 9452 in vbox


Ignore:
Timestamp:
Jun 6, 2008 9:17:18 AM (16 years ago)
Author:
vboxsync
Message:

Preparing for 64 bits vmlaunch/vmresume.

Location:
trunk
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/hwacc_vmx.h

    r8155 r9452  
    10141014
    10151015/**
    1016  * Prepares for and executes VMLAUNCH
    1017  *
    1018  * @returns VBox status code
    1019  * @param   pCtx        Guest context
    1020  */
    1021 DECLASM(int) VMXStartVM(PCPUMCTX pCtx);
    1022 
    1023 /**
    1024  * Prepares for and executes VMRESUME
    1025  *
    1026  * @returns VBox status code
    1027  * @param   pCtx        Guest context
    1028  */
    1029 DECLASM(int) VMXResumeVM(PCPUMCTX pCtx);
    1030 
    1031 /**
    10321016 * Gets the last instruction error value from the current VMCS
    10331017 *
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r9410 r9452  
    194194        /** Virtual address of the TSS page used for real mode emulation. */
    195195        R0PTRTYPE(PVBOXTSS)         pRealModeTSS;
     196
     197        /** Ring 0 handlers for VT-x. */
     198        DECLR0CALLBACKMEMBER(int, pfnStartVM,(RTHCUINT fResume, PCPUMCTX pCtx));
    196199
    197200        /** Host CR4 value (set by ring-0 VMX init) */
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm

    r9161 r9452  
    178178
    179179;/**
    180 ; * Prepares for and executes VMLAUNCH
    181 ; *
    182 ; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
    183 ; *
    184 ; * @returns VBox status code
    185 ; * @param   pCtx        Guest context
    186 ; */
    187 BEGINPROC VMXStartVM
     180; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
     181; *
     182; * @returns VBox status code
     183; * @param   fResume    vmlauch/vmresume
     184; * @param   pCtx       Guest context
     185; */
     186BEGINPROC VMXR0StartVM32
    188187    push    xBP
    189188    mov     xBP, xSP
     
    201200    mov     eax, VMX_VMCS_HOST_RIP  ;/* return address (too difficult to continue after VMLAUNCH?) */
    202201    vmwrite xAX, [xSP]
    203     ;/* @todo assumes success... */
     202    ;/* Note: assumes success... */
    204203    add     xSP, xS
    205204
     
    225224%ifdef RT_ARCH_AMD64
    226225 %ifdef ASM_CALL64_GCC
    227     mov     rsi, rdi ; pCtx
     226    ; fResume already in rdi
     227    ; pCtx    already in rsi
    228228 %else
    229     mov     rsi, rcx ; pCtx
     229    mov     rdi, rcx        ; fResume
     230    mov     rsi, rdx        ; pCtx
    230231 %endif
    231232%else
    232     mov     esi, [ebp + 8] ; pCtx
     233    mov     edi, [ebp + 8]  ; fResume
     234    mov     esi, [ebp + 12] ; pCtx
    233235%endif
    234236    push    xSI
     
    258260    mov     eax, VMX_VMCS_HOST_RSP
    259261    vmwrite xAX, xSP
    260     ;/* @todo assumes success... */
     262    ;/* Note: assumes success... */
    261263    ;/* Don't mess with ESP anymore!! */
    262264
     
    266268    mov     ecx, [xSI + CPUMCTX.ecx]
    267269    mov     edx, [xSI + CPUMCTX.edx]
     270    mov     ebp, [xSI + CPUMCTX.ebp]
     271
     272    ; resume or start?
     273    cmp     xDI, 0                  ; fResume
     274    je      .vmlauch_lauch
     275
     276    ;/* Restore edi & esi. */
    268277    mov     edi, [xSI + CPUMCTX.edi]
    269     mov     ebp, [xSI + CPUMCTX.ebp]
     278    mov     esi, [xSI + CPUMCTX.esi]
     279
     280    vmresume
     281    jmp     .vmlaunch_done;      ;/* here if vmresume detected a failure. */
     282   
     283.vmlauch_lauch:   
     284    ;/* Restore edi & esi. */
     285    mov     edi, [xSI + CPUMCTX.edi]
    270286    mov     esi, [xSI + CPUMCTX.esi]
    271287
     
    365381    jmp     .vmstart_end
    366382
    367 ENDPROC VMXStartVM
    368 
    369 
    370 ;/**
    371 ; * Prepares for and executes VMRESUME
    372 ; *
    373 ; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
    374 ; *
    375 ; * @returns VBox status code
    376 ; * @param   pCtx        Guest context
    377 ; */
    378 BEGINPROC VMXResumeVM
    379     push    xBP
    380     mov     xBP, xSP
    381 
    382     pushf
    383     cli
    384 
    385     ;/* First we have to save some final CPU context registers. */
    386 %ifdef RT_ARCH_AMD64
    387     mov     rax, qword .vmresume_done
    388     push    rax
    389 %else
    390     push    .vmresume_done
    391 %endif
    392     mov     eax, VMX_VMCS_HOST_RIP  ;/* return address (too difficult to continue after VMLAUNCH?) */
    393     vmwrite xAX, [xSP]
    394     ;/* @todo assumes success... */
    395     add     xSP, xS
    396 
    397     ;/* Manual save and restore:
    398     ; * - General purpose registers except RIP, RSP
    399     ; *
    400     ; * Trashed:
    401     ; * - CR2 (we don't care)
    402     ; * - LDTR (reset to 0)
    403     ; * - DRx (presumably not changed at all)
    404     ; * - DR7 (reset to 0x400)
    405     ; * - EFLAGS (reset to RT_BIT(1); not relevant)
    406     ; *
    407     ; */
    408 
    409     ;/* Save all general purpose host registers. */
    410     MYPUSHAD
    411 
    412     ;/* Save segment registers */
    413     MYPUSHSEGS xAX, ax
    414 
    415     ;/* Save the Guest CPU context pointer. */
    416 %ifdef RT_ARCH_AMD64
    417  %ifdef ASM_CALL64_GCC
    418     mov     rsi, rdi        ; pCtx
    419  %else
    420     mov     rsi, rcx        ; pCtx
    421  %endif
    422 %else
    423     mov     esi, [ebp + 8]  ; pCtx
    424 %endif
    425     push    xSI
    426 
    427     ; Save LDTR
    428     xor     eax, eax
    429     sldt    ax
    430     push    xAX
    431 
    432     ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
    433     sub     xSP, xS*2
    434     sgdt    [xSP]
    435 
    436     sub     xSP, xS*2
    437     sidt    [xSP]
    438 
    439 %ifdef VBOX_WITH_DR6_EXPERIMENT
    440     ; Restore DR6 - experiment, not safe!
    441     mov     xBX, [xSI + CPUMCTX.dr6]
    442     mov     dr6, xBX
    443 %endif
    444 
    445     ; Restore CR2
    446     mov     xBX, [xSI + CPUMCTX.cr2]
    447     mov     cr2, xBX
    448 
    449     mov     eax, VMX_VMCS_HOST_RSP
    450     vmwrite xAX, xSP
    451     ;/* @todo assumes success... */
    452     ;/* Don't mess with ESP anymore!! */
    453 
    454     ;/* Restore Guest's general purpose registers. */
    455     mov     eax, [xSI + CPUMCTX.eax]
    456     mov     ebx, [xSI + CPUMCTX.ebx]
    457     mov     ecx, [xSI + CPUMCTX.ecx]
    458     mov     edx, [xSI + CPUMCTX.edx]
    459     mov     edi, [xSI + CPUMCTX.edi]
    460     mov     ebp, [xSI + CPUMCTX.ebp]
    461     mov     esi, [xSI + CPUMCTX.esi]
    462 
    463     vmresume
    464     jmp     .vmresume_done;      ;/* here if vmresume detected a failure. */
    465 
    466 ALIGNCODE(16)
    467 .vmresume_done:
    468     jc      near .vmxresume_invalid_vmxon_ptr
    469     jz      near .vmxresume_start_failed
    470 
    471     ; Restore base and limit of the IDTR & GDTR
    472     lidt    [xSP]
    473     add     xSP, xS*2
    474     lgdt    [xSP]
    475     add     xSP, xS*2
    476 
    477     push    xDI
    478     mov     xDI, [xSP + xS * 2]         ; pCtx
    479 
    480     mov     [ss:xDI + CPUMCTX.eax], eax
    481     mov     [ss:xDI + CPUMCTX.ebx], ebx
    482     mov     [ss:xDI + CPUMCTX.ecx], ecx
    483     mov     [ss:xDI + CPUMCTX.edx], edx
    484     mov     [ss:xDI + CPUMCTX.esi], esi
    485     mov     [ss:xDI + CPUMCTX.ebp], ebp
    486 %ifdef RT_ARCH_AMD64
    487     pop     xAX                                 ; the guest edi we pushed above
    488     mov     dword [ss:xDI + CPUMCTX.edi], eax
    489 %else
    490     pop     dword [ss:xDI + CPUMCTX.edi]        ; the guest edi we pushed above
    491 %endif
    492 
    493 %ifdef VBOX_WITH_DR6_EXPERIMENT
    494     ; Save DR6 - experiment, not safe!
    495     mov     xAX, dr6
    496     mov     [ss:xDI + CPUMCTX.dr6], xAX
    497 %endif
    498 
    499     pop     xAX          ; saved LDTR
    500     lldt    ax
    501 
    502     add     xSP, xS      ; pCtx
    503 
    504     ; Restore segment registers
    505     MYPOPSEGS xAX, ax
    506 
    507     ; Restore general purpose registers
    508     MYPOPAD
    509 
    510     mov     eax, VINF_SUCCESS
    511 
    512 .vmresume_end:
    513     popf
    514     pop     xBP
    515     ret
    516 
    517 .vmxresume_invalid_vmxon_ptr:
    518     ; Restore base and limit of the IDTR & GDTR
    519     lidt    [xSP]
    520     add     xSP, xS*2
    521     lgdt    [xSP]
    522     add     xSP, xS*2
    523 
    524     pop     xAX         ; saved LDTR
    525     lldt    ax
    526 
    527     add     xSP, xS     ; pCtx
    528 
    529     ; Restore segment registers
    530     MYPOPSEGS xAX, ax
    531 
    532     ; Restore all general purpose host registers.
    533     MYPOPAD
    534     mov     eax, VERR_VMX_INVALID_VMXON_PTR
    535     jmp     .vmresume_end
    536 
    537 .vmxresume_start_failed:
    538     ; Restore base and limit of the IDTR & GDTR
    539     lidt    [xSP]
    540     add     xSP, xS*2
    541     lgdt    [xSP]
    542     add     xSP, xS*2
    543 
    544     pop     xAX         ; saved LDTR
    545     lldt    ax
    546 
    547     add     xSP, xS     ; pCtx
    548 
    549     ; Restore segment registers
    550     MYPOPSEGS xAX, ax
    551 
    552     ; Restore all general purpose host registers.
    553     MYPOPAD
    554     mov     eax, VERR_VMX_UNABLE_TO_RESUME_VM
    555     jmp     .vmresume_end
    556 
    557 ENDPROC VMXResumeVM
    558 
    559 
    560 %ifdef RT_ARCH_AMD64
     383ENDPROC VMXR0StartVM32
     384
     385%ifdef RT_ARCH_AMD64
     386;/**
     387; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
     388; *
     389; * @returns VBox status code
     390; * @param   fResume    vmlauch/vmresume
     391; * @param   pCtx       Guest context
     392; */
     393BEGINPROC VMXR0StartVM64
     394    ret
     395ENDPROC VMXR0StartVM64
     396
    561397;/**
    562398; * Executes VMWRITE
    563399; *
    564400; * @returns VBox status code
    565 ; * @param   idxField   x86: [ebp + 08h]  msc: rcx  gcc: edi   VMCS index
     401; * @param   idxField   x86: [ebp + 08h]  msc: rcx  gcc: rdi   VMCS index
    566402; * @param   pData      x86: [ebp + 0ch]  msc: rdx  gcc: rsi   VM field value
    567403; */
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r9411 r9452  
    642642            case PGMMODE_PROTECTED:     /* Protected mode, no paging. */
    643643                AssertFailed();
    644                 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     644                return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    645645
    646646            case PGMMODE_32_BIT:        /* 32-bit paging. */
     
    659659#else
    660660                AssertFailed();
    661                 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     661                return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    662662#endif
    663663
    664664            default:                    /* shut up gcc */
    665665                AssertFailed();
    666                 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     666                return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    667667            }
    668668        }
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r9421 r9452  
    841841#else
    842842            AssertFailed();
    843             return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     843            return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    844844#endif
    845845        default:                   /* shut up gcc */
    846846            AssertFailed();
    847             return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     847            return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    848848        }
    849849        /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
     
    960960    /* 64 bits guest mode? */
    961961    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
     962    {
    962963        val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
     964#ifndef VBOX_WITH_64_BITS_GUESTS
     965        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     966#else
     967        pVM->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM64;
     968#endif
     969    }
     970    else
     971    {
     972        pVM->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM32;
     973    }
    963974
    964975    /* Done. */
     
    11581169    /* All done! Let's start VM execution. */
    11591170    STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
    1160     if (pVM->hwaccm.s.vmx.fResumeVM == false)
    1161         rc = VMXStartVM(pCtx);
    1162     else
    1163         rc = VMXResumeVM(pCtx);
     1171    rc = pVM->hwaccm.s.vmx.pfnStartVM(pVM->hwaccm.s.vmx.fResumeVM, pCtx);
    11641172
    11651173    /* In case we execute a goto ResumeExecution later on. */
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r9421 r9452  
    162162
    163163
     164
     165/**
     166 * Prepares for and executes VMLAUNCH (32 bits guest mode)
     167 *
     168 * @returns VBox status code
     169 * @param   fResume     vmlauch/vmresume
     170 * @param   pCtx        Guest context
     171 */
     172DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx);
     173
     174/**
     175 * Prepares for and executes VMLAUNCH (64 bits guest mode)
     176 *
     177 * @returns VBox status code
     178 * @param   fResume     vmlauch/vmresume
     179 * @param   pCtx        Guest context
     180 */
     181DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx);
     182
    164183#endif /* IN_RING0 */
    165184
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette