Changeset 19992 in vbox
- Timestamp:
- May 25, 2009 11:31:23 AM (15 years ago)
- Location:
- trunk
- Files:
-
- 8 edited
-
include/VBox/iom.h (modified) (1 diff)
-
include/VBox/pgm.h (modified) (1 diff)
-
src/VBox/VMM/HWACCMInternal.h (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/EMAll.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IOMAllMMIO.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/PGMAllHandler.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR0/HWSVMR0.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR0/HWVMXR0.cpp (modified) (12 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/iom.h
r19807 r19992 215 215 VMMDECL(int) IOMInterpretCheckPortIOAccess(PVM pVM, PCPUMCTXCORE pCtxCore, RTIOPORT Port, unsigned cb); 216 216 VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags); 217 VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags); 217 218 VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys); 218 219 VMMDECL(bool) IOMIsLockOwner(PVM pVM); -
trunk/include/VBox/pgm.h
r19682 r19992 345 345 VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage); 346 346 VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap); 347 VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap); 347 348 VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys); 348 349 VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys); -
trunk/src/VBox/VMM/HWACCMInternal.h
r19910 r19992 259 259 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 260 260 261 /** R0 memory object for the virtual APIC mmio cache. */261 /** R0 memory object for the APIC physical page (serves for filtering accesses). */ 262 262 RTR0MEMOBJ pMemObjAPIC; 263 /** Physical address of the virtual APIC mmio cache. */263 /** Physical address of the APIC physical page (serves for filtering accesses). */ 264 264 RTHCPHYS pAPICPhys; 265 /** Virtual address of the virtual APIC mmio cache. */265 /** Virtual address of the APIC physical page (serves for filtering accesses). */ 266 266 R0PTRTYPE(uint8_t *) pAPIC; 267 267 … … 489 489 /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */ 490 490 uint64_t proc_ctls; 491 492 /** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */ 493 uint64_t proc_ctls2; 494 495 /** R0 memory object for the virtual APIC page for TPR caching. */ 496 RTR0MEMOBJ pMemObjVAPIC; 497 /** Physical address of the virtual APIC page for TPR caching. */ 498 RTHCPHYS pVAPICPhys; 499 /** Virtual address of the virtual APIC page for TPR caching. */ 500 R0PTRTYPE(uint8_t *) pVAPIC; 491 501 492 502 /** Current CR0 mask. */ -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r19808 r19992 2903 2903 rc = PDMApicReadMSR(pVM, pVCpu->idCpu, pRegFrame->ecx, &val); 2904 2904 else 2905 /* We should actually trigger a #GP here, but don't as that mightcause more trouble. */2905 /* We should actually trigger a #GP here, but don't as that will cause more trouble. */ 2906 2906 val = 0; 2907 2907 break; -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r19677 r19992 1884 1884 } 1885 1885 1886 /** 1887 * Mapping a HC page in place of an MMIO page for direct access. 1888 * 1889 * (This is a special optimization used by the APIC in the VT-x case.) 1890 * 1891 * @returns VBox status code. 1892 * 1893 * @param pVM The virtual machine. 1894 * @param GCPhys The address of the MMIO page to be changed. 1895 * @param HCPhys The address of the host physical page. 1896 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P) 1897 * for the time being. 1898 */ 1899 VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags) 1900 { 1901 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags)); 1902 1903 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER); 1904 Assert(HWACCMIsEnabled(pVM)); 1905 1906 PVMCPU pVCpu = VMMGetCpu(pVM); 1907 1908 /* 1909 * Lookup the context range node the page belongs to. 1910 */ 1911 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys); 1912 AssertMsgReturn(pRange, 1913 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), 1914 VERR_IOM_MMIO_RANGE_NOT_FOUND); 1915 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0); 1916 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK); 1917 1918 /* 1919 * Do the aliasing; page align the addresses since PGM is picky. 1920 */ 1921 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK; 1922 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK; 1923 1924 int rc = PGMHandlerPhysicalPageAliasHC(pVM, pRange->GCPhys, GCPhys, HCPhys); 1925 AssertRCReturn(rc, rc); 1926 1927 /* 1928 * Modify the shadow page table. Since it's an MMIO page it won't be present and we 1929 * can simply prefetch it. 1930 * 1931 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page. 1932 */ 1933 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys); 1934 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); 1935 return VINF_SUCCESS; 1936 } 1886 1937 1887 1938 /** -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r19903 r19992 1010 1010 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage), 1011 1011 VERR_PGM_PHYS_NOT_MMIO2); 1012 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPage ))1012 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap)) 1013 1013 return VINF_PGM_HANDLER_ALREADY_ALIASED; 1014 1014 … … 1035 1035 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 1036 1036 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage)); 1037 1038 #ifndef IN_RC 1039 HWACCMInvalidatePhysPage(pVM, GCPhysPage); 1040 #endif 1041 return VINF_SUCCESS; 1042 } 1043 1044 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", 1045 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast)); 1046 return VERR_INVALID_PARAMETER; 1047 } 1048 1049 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys)); 1050 return VERR_PGM_HANDLER_NOT_FOUND; 1051 } 1052 1053 /** 1054 * Replaces an MMIO page with an arbitrary HC page. 1055 * 1056 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to 1057 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no 1058 * backing, the caller must provide a replacement page. For various reasons the 1059 * replacement page must be an MMIO2 page. 1060 * 1061 * The caller must do required page table modifications. You can get away 1062 * without making any modifations since it's an MMIO page, the cost is an extra 1063 * \#PF which will the resync the page. 1064 * 1065 * Call PGMHandlerPhysicalReset() to restore the MMIO page. 1066 * 1067 * The caller may still get handler callback even after this call and must be 1068 * able to deal correctly with such calls. The reason for these callbacks are 1069 * either that we're executing in the recompiler (which doesn't know about this 1070 * arrangement) or that we've been restored from saved state (where we won't 1071 * save the change). 1072 * 1073 * @returns VBox status code. 1074 * @param pVM The VM handle 1075 * @param GCPhys The start address of the access handler. This 1076 * must be a fully page aligned range or we risk 1077 * messing up other handlers installed for the 1078 * start and end pages. 1079 * @param GCPhysPage The physical address of the page to turn off 1080 * access monitoring for. 1081 * @param HCPhysPageRemap The physical address of the HC page that 1082 * serves as backing memory. 1083 * 1084 * @remark May cause a page pool flush if used on a page that is already 1085 * aliased. 1086 */ 1087 VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap) 1088 { 1089 /// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */ 1090 1091 /* 1092 * Lookup and validate the range. 1093 */ 1094 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 1095 if (RT_LIKELY(pCur)) 1096 { 1097 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key 1098 && GCPhysPage <= pCur->Core.KeyLast)) 1099 { 1100 AssertReturn(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, VERR_ACCESS_DENIED); 1101 AssertReturn(!(pCur->Core.Key & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER); 1102 AssertReturn((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, VERR_INVALID_PARAMETER); 1103 1104 /* 1105 * Get and validate the pages. 1106 */ 1107 PPGMPAGE pPage; 1108 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage); 1109 AssertRCReturn(rc, rc); 1110 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO) 1111 { 1112 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO, 1113 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage), 1114 VERR_PGM_PHYS_NOT_MMIO2); 1115 return VINF_PGM_HANDLER_ALREADY_ALIASED; 1116 } 1117 Assert(PGM_PAGE_IS_ZERO(pPage)); 1118 1119 /* 1120 * Do the actual remapping here. 1121 * This page now serves as an alias for the backing memory specified. 1122 */ 1123 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %HGp\n", 1124 GCPhysPage, pPage, HCPhysPageRemap)); 1125 PGM_PAGE_SET_HCPHYS(pPage, HCPhysPageRemap); 1126 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO); 1127 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED); 1128 /** @todo hack alert */ 1129 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID); 1130 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 1131 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage)); 1037 1132 1038 1133 #ifndef IN_RC -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r19976 r19992 769 769 770 770 /* 64 bits guest mode? */ 771 if ( pCtx->msrEFER & MSR_K6_EFER_LMA)771 if (CPUMIsGuestInLongModeEx(pCtx)) 772 772 { 773 773 #if !defined(VBOX_ENABLE_64_BITS_GUESTS) … … 942 942 /* Note the 32 bits exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but that appears missing in Intel CPUs */ 943 943 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! */ 944 if ( pCtx->msrEFER & MSR_K6_EFER_LMA)944 if (CPUMIsGuestInLongModeEx(pCtx)) 945 945 { 946 946 bool fPending; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r19986 r19992 180 180 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 181 181 { 182 /* Allocate one page for the virtual APIC mmio cache. */182 /* Allocate one page for the APIC physical page (serves for filtering accesses). */ 183 183 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjAPIC, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 184 184 AssertRC(rc); … … 245 245 pVCpu->hwaccm.s.vmx.cr0_mask = 0; 246 246 pVCpu->hwaccm.s.vmx.cr4_mask = 0; 247 248 /* Allocate one page for the virtual APIC page for TPR caching. */ 249 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjVAPIC, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 250 AssertRC(rc); 251 if (RT_FAILURE(rc)) 252 return rc; 253 254 pVCpu->hwaccm.s.vmx.pVAPIC = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjVAPIC); 255 pVCpu->hwaccm.s.vmx.pVAPICPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjVAPIC, 0); 256 ASMMemZero32(pVCpu->hwaccm.s.vmx.pVAPIC, PAGE_SIZE); 247 257 248 258 /* Current guest paging mode. */ … … 267 277 for (unsigned i=0;i<pVM->cCPUs;i++) 268 278 { 269 if (pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS != NIL_RTR0MEMOBJ) 270 { 271 RTR0MemObjFree(pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS, false); 272 pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS = NIL_RTR0MEMOBJ; 273 pVM->aCpus[i].hwaccm.s.vmx.pVMCS = 0; 274 pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys = 0; 279 PVMCPU pVCpu = &pVM->aCpus[i]; 280 281 if (pVCpu->hwaccm.s.vmx.pMemObjVMCS != NIL_RTR0MEMOBJ) 282 { 283 RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjVMCS, false); 284 pVCpu->hwaccm.s.vmx.pMemObjVMCS = NIL_RTR0MEMOBJ; 285 pVCpu->hwaccm.s.vmx.pVMCS = 0; 286 pVCpu->hwaccm.s.vmx.pVMCSPhys = 0; 287 } 288 if (pVCpu->hwaccm.s.vmx.pMemObjVAPIC != NIL_RTR0MEMOBJ) 289 { 290 RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjVAPIC, false); 291 pVCpu->hwaccm.s.vmx.pMemObjVAPIC = NIL_RTR0MEMOBJ; 292 pVCpu->hwaccm.s.vmx.pVAPIC = 0; 293 pVCpu->hwaccm.s.vmx.pVAPICPhys = 0; 275 294 } 276 295 } … … 415 434 /** @todo make sure they don't conflict with the above requirements. */ 416 435 val &= pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; 417 436 pVCpu->hwaccm.s.vmx.proc_ctls2 = val; 418 437 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2, val); 419 438 AssertRC(rc); … … 475 494 /* Optional */ 476 495 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0); 477 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys); 496 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hwaccm.s.vmx.pVAPICPhys); 497 498 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 499 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys); 500 478 501 AssertRC(rc); 479 502 } … … 1142 1165 #endif 1143 1166 /* 64 bits guest mode? */ 1144 if ( pCtx->msrEFER & MSR_K6_EFER_LMA)1167 if (CPUMIsGuestInLongModeEx(pCtx)) 1145 1168 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE; 1146 1169 /* else Must be zero when AMD64 is not available. */ … … 1168 1191 /* else: Must be zero when AMD64 is not available. */ 1169 1192 #elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 1170 if ( pCtx->msrEFER & MSR_K6_EFER_LMA)1193 if (CPUMIsGuestInLongModeEx(pCtx)) 1171 1194 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64; /* our switcher goes to long mode */ 1172 1195 else … … 1623 1646 1624 1647 /* 64 bits guest mode? */ 1625 if ( pCtx->msrEFER & MSR_K6_EFER_LMA)1648 if (CPUMIsGuestInLongModeEx(pCtx)) 1626 1649 { 1627 1650 #if !defined(VBOX_ENABLE_64_BITS_GUESTS) … … 2140 2163 * @todo reduce overhead 2141 2164 */ 2142 if ( (pCtx->msrEFER & MSR_K6_EFER_LMA)2165 if ( CPUMIsGuestInLongModeEx(pCtx) 2143 2166 && pVM->hwaccm.s.vmx.pAPIC) 2144 2167 { … … 2904 2927 /* If the page is present, then it's a page level protection fault. */ 2905 2928 if (exitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT) 2929 { 2906 2930 errCode |= X86_TRAP_PF_P; 2907 2931 2932 #if 0 2933 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */ 2934 if ( (GCPhys & 0xfff) == 0x080 2935 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */ 2936 && !CPUMIsGuestInLongModeEx(pCtx) 2937 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 2938 { 2939 RTGCPHYS GCPhysApicBase; 2940 PDMApicGetBase(pVM, &GCPhysApicBase); /* @todo cache this */ 2941 if (GCPhys == GCPhysApicBase + 0x80) 2942 { 2943 pVCpu->hwaccm.s.vmx.proc_ctls2 |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; 2944 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2, val); 2945 AssertRC(rc); 2946 } 2947 } 2948 #endif 2949 } 2908 2950 LogFlow(("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode)); 2909 2951 … … 3376 3418 PDMApicGetBase(pVM, &GCPhys); 3377 3419 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(exitQualification); 3420 3421 Log(("Apic access at %RGp\n", GCPhys)); 3378 3422 rc = VINF_EM_RAW_EMULATE_INSTR; 3379 3423 break; … … 4114 4158 case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL: 4115 4159 case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL: 4160 case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL: 4116 4161 case VMX_VMCS_GUEST_LINK_PTR_FULL: 4117 4162 case VMX_VMCS_GUEST_PDPTR0_FULL:
Note:
See TracChangeset
for help on using the changeset viewer.

