Changeset 19776 in vbox
- Timestamp:
- May 18, 2009 11:29:24 AM (15 years ago)
- File:
-
- 1 edited
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h (modified) (11 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r19775 r19776 150 150 # endif /* !PGM_WITH_PAGING */ 151 151 152 153 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 154 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT; 155 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s); 156 157 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 158 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */ 159 160 PX86PDPAE pPDDst; 161 # if PGM_GST_TYPE != PGM_TYPE_PAE 162 X86PDPE PdpeSrc; 163 164 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */ 165 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 166 # endif 167 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst); 168 if (rc != VINF_SUCCESS) 169 { 170 AssertRC(rc); 171 return rc; 172 } 173 Assert(pPDDst); 174 175 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 176 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 177 PX86PDPAE pPDDst; 178 # if PGM_GST_TYPE == PGM_TYPE_PROT 179 /* AMD-V nested paging */ 180 X86PML4E Pml4eSrc; 181 X86PDPE PdpeSrc; 182 PX86PML4E pPml4eSrc = &Pml4eSrc; 183 184 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ 185 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A; 186 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A; 187 # endif 188 189 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst); 190 if (rc != VINF_SUCCESS) 191 { 192 AssertRC(rc); 193 return rc; 194 } 195 Assert(pPDDst); 196 197 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 198 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 199 PEPTPD pPDDst; 200 201 rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst); 202 if (rc != VINF_SUCCESS) 203 { 204 AssertRC(rc); 205 return rc; 206 } 207 Assert(pPDDst); 208 # endif 209 210 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 211 /* 212 * If we successfully correct the write protection fault due to dirty bit 213 * tracking, or this page fault is a genuine one, then return immediately. 214 */ 215 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 216 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault); 217 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 218 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT 219 || rc == VINF_EM_RAW_GUEST_TRAP) 220 { 221 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) 222 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 223 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP")); 224 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc; 225 } 226 227 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]); 228 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 229 230 /* 231 * A common case is the not-present error caused by lazy page table syncing. 232 * 233 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here 234 * so we can safely assume that the shadow PT is present when calling SyncPage later. 235 * 236 * On failure, we ASSUME that SyncPT is out of memory or detected some kind 237 * of mapping conflict and defer to SyncCR3 in R3. 238 * (Again, we do NOT support access handlers for non-present guest pages.) 239 * 240 */ 152 /* Fetch the guest PDE */ 241 153 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 242 154 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; … … 249 161 PdeSrc.n.u1User = 1; 250 162 # endif 251 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 252 && !pPDDst->a[iPDDst].n.u1Present 253 && PdeSrc.n.u1Present 254 ) 255 256 { 257 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; }); 258 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 259 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0])); 260 pgmLock(pVM); 261 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault); 262 pgmUnlock(pVM); 263 if (RT_SUCCESS(rc)) 264 { 265 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 163 164 pgmLock(pVM); 165 { /* Force the shadow pointers to go out of scope after releasing the lock. */ 166 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 167 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT; 168 PX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s); 169 170 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 171 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */ 172 173 PX86PDPAE pPDDst; 174 # if PGM_GST_TYPE != PGM_TYPE_PAE 175 X86PDPE PdpeSrc; 176 177 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */ 178 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 179 # endif 180 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst); 181 if (rc != VINF_SUCCESS) 182 { 183 pgmUnlock(pVM); 184 AssertRC(rc); 266 185 return rc; 267 186 } 268 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc)); 269 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */ 270 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 271 return VINF_PGM_SYNC_CR3; 187 Assert(pPDDst); 188 189 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 190 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 191 PX86PDPAE pPDDst; 192 # if PGM_GST_TYPE == PGM_TYPE_PROT 193 /* AMD-V nested paging */ 194 X86PML4E Pml4eSrc; 195 X86PDPE PdpeSrc; 196 PX86PML4E pPml4eSrc = &Pml4eSrc; 197 198 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ 199 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A; 200 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A; 201 # endif 202 203 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst); 204 if (rc != VINF_SUCCESS) 205 { 206 pgmUnlock(pVM); 207 AssertRC(rc); 208 return rc; 209 } 210 Assert(pPDDst); 211 212 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 213 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 214 PEPTPD pPDDst; 215 216 rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst); 217 if (rc != VINF_SUCCESS) 218 { 219 pgmUnlock(pVM); 220 AssertRC(rc); 221 return rc; 222 } 223 Assert(pPDDst); 224 # endif 225 226 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 227 /* 228 * If we successfully correct the write protection fault due to dirty bit 229 * tracking, or this page fault is a genuine one, then return immediately. 230 */ 231 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 232 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault); 233 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 234 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT 235 || rc == VINF_EM_RAW_GUEST_TRAP) 236 { 237 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) 238 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 239 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP")); 240 pgmUnlock(pVM); 241 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc; 242 } 243 244 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0ePD[iPDSrc]); 245 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 246 247 /* 248 * A common case is the not-present error caused by lazy page table syncing. 249 * 250 * It is IMPORTANT that we weed out any access to non-present shadow PDEs here 251 * so we can safely assume that the shadow PT is present when calling SyncPage later. 252 * 253 * On failure, we ASSUME that SyncPT is out of memory or detected some kind 254 * of mapping conflict and defer to SyncCR3 in R3. 255 * (Again, we do NOT support access handlers for non-present guest pages.) 256 * 257 */ 258 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 259 && !pPDDst->a[iPDDst].n.u1Present 260 && PdeSrc.n.u1Present 261 ) 262 { 263 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; }); 264 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 265 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0])); 266 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault); 267 pgmUnlock(pVM); 268 if (RT_SUCCESS(rc)) 269 { 270 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 271 return rc; 272 } 273 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc)); 274 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */ 275 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 276 return VINF_PGM_SYNC_CR3; 277 } 278 pgmUnlock(pVM); 272 279 } 273 280 … … 2070 2077 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2071 2078 2079 Assert(PGMIsLockOwner(pVM)); 2080 2072 2081 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 2073 2082 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u)); … … 2155 2164 pPdeSrc->b.u1Dirty = 1; 2156 2165 2157 if (pPdeDst->n.u1Present && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)) 2166 if ( pPdeDst->n.u1Present 2167 && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)) 2158 2168 { 2159 2169 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2160 2161 2170 Assert(pPdeSrc->b.u1Write); 2162 2171 2172 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2173 * fault again and take this path to only invalidate the entry. 2174 */ 2163 2175 pPdeDst->n.u1Write = 1; 2164 2176 pPdeDst->n.u1Accessed = 1; … … 2255 2267 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 2256 2268 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2257 if ( pPteDst->n.u1Present /** @todo Optimize accessed bit emulation? */ 2258 && (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)) 2269 if (pPteDst->n.u1Present) /** @todo Optimize accessed bit emulation? */ 2259 2270 { 2260 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage)); 2271 if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY) 2272 { 2273 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage)); 2261 2274 # ifdef VBOX_STRICT 2262 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);2263 if (pPage)2264 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage),2265 ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));2275 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK); 2276 if (pPage) 2277 AssertMsg(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage), 2278 ("Unexpected dirty bit tracking on monitored page %RGv (phys %RGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK)); 2266 2279 # endif 2267 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2268 2269 Assert(pPteSrc->n.u1Write); 2270 2271 pPteDst->n.u1Write = 1; 2272 pPteDst->n.u1Dirty = 1; 2273 pPteDst->n.u1Accessed = 1; 2274 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY; 2275 PGM_INVL_PG(GCPtrPage); 2276 2277 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 2278 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; 2280 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2281 2282 Assert(pPteSrc->n.u1Write); 2283 2284 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2285 * fault again and take this path to only invalidate the entry. 2286 */ 2287 pPteDst->n.u1Write = 1; 2288 pPteDst->n.u1Dirty = 1; 2289 pPteDst->n.u1Accessed = 1; 2290 pPteDst->au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY; 2291 PGM_INVL_PG(GCPtrPage); 2292 2293 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 2294 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; 2295 } 2296 else 2297 if ( pPteDst->n.u1Write == 1 2298 && pPteDst->n.u1Accessed == 1) 2299 { 2300 /* Stale TLB entry. */ 2301 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale)); 2302 PGM_INVL_PG(GCPtrPage); 2303 2304 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 2305 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; 2306 } 2279 2307 } 2280 2308 } … … 3111 3139 int rc = VINF_SUCCESS; 3112 3140 3141 pgmLock(pVM); 3142 3113 3143 /* 3114 3144 * First check if the shadow pd is present. … … 3168 3198 if (!pPdeDst->n.u1Present) 3169 3199 { 3170 pgmLock(pVM);3171 3200 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage); 3172 pgmUnlock(pVM);3173 AssertRC(rc);3174 3201 if (rc != VINF_SUCCESS) 3175 3202 { … … 3178 3205 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 3179 3206 # endif 3207 pgmUnlock(pVM); 3208 AssertRC(rc); 3180 3209 return rc; 3181 3210 } … … 3190 3219 { 3191 3220 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 3192 # else3221 # else 3193 3222 { 3194 3223 GSTPDE PdeSrc; … … 3199 3228 PdeSrc.n.u1User = 1; 3200 3229 3201 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */3230 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 3202 3231 Assert(rc != VINF_EM_RAW_GUEST_TRAP); 3203 3232 if (uErr & X86_TRAP_PF_US) … … 3223 3252 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 3224 3253 # endif 3254 pgmUnlock(pVM); 3225 3255 return rc; 3226 3256
Note:
See TracChangeset
for help on using the changeset viewer.

