Changeset 13714 in vbox
- Timestamp:
- Oct 31, 2008 2:01:43 PM (16 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
-
include/VBox/vmm.h (modified) (1 diff)
-
src/VBox/VMM/PDMLdr.cpp (modified) (1 diff)
-
src/VBox/VMM/VMM.cpp (modified) (43 diffs)
-
src/VBox/VMM/VMMGC/VMMGC.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMGuruMeditation.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMInternal.h (modified) (3 diffs)
-
src/VBox/VMM/VMMR0/VMMR0.cpp (modified) (10 diffs)
-
src/VBox/VMM/VMMTests.cpp (modified) (6 diffs)
-
src/VBox/VMM/testcase/tstVMStructGC.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm.h
r13701 r13714 129 129 VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM); 130 130 VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM); 131 VMMR3DECL(int) VMMR3GetImport GC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue);131 VMMR3DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue); 132 132 VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher); 133 133 VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM); -
trunk/src/VBox/VMM/PDMLdr.cpp
r13633 r13714 340 340 || !strcmp(pszSymbol, "g_RelLogger")) 341 341 { 342 RT GCPTR GCPtr = 0;343 rc = VMMR3GetImport GC(pVM, pszSymbol, &GCPtr);342 RTRCPTR RCPtr = 0; 343 rc = VMMR3GetImportRC(pVM, pszSymbol, &RCPtr); 344 344 if (VBOX_SUCCESS(rc)) 345 *pValue = GCPtr;345 *pValue = RCPtr; 346 346 } 347 347 else if ( !strncmp(pszSymbol, "TM", 2) -
trunk/src/VBox/VMM/VMM.cpp
r13703 r13714 137 137 *******************************************************************************/ 138 138 static int vmmR3InitCoreCode(PVM pVM); 139 static int vmmR3InitStacks(PVM pVM); 140 static int vmmR3InitLoggers(PVM pVM); 139 141 static void vmmR3InitRegisterStats(PVM pVM); 140 142 static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM); … … 209 211 210 212 /* 211 * Init core code.213 * Init various sub-components. 212 214 */ 213 215 rc = vmmR3InitCoreCode(pVM); 214 if (VBOX_SUCCESS(rc)) 215 { 216 /* 217 * Allocate & init VMM RC stack. 218 * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked. 219 * (The page protection is modifed during R3 init completion.) 220 */ 221 /** @todo SMP: Per vCPU, split up into functions. */ 222 #ifdef VBOX_STRICT_VMM_STACK 223 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack); 224 #else 225 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3); 226 #endif 227 if (VBOX_SUCCESS(rc)) 228 { 229 /* Set HC and GC stack pointers to top of stack. */ 230 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = MMHyperR3ToR0(pVM, pVM->vmm.s.pbEMTStackR3); 231 pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3); 232 pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE; 233 AssertRelease(pVM->vmm.s.pbEMTStackRC); 234 235 /* Set hypervisor esp. */ 236 CPUMSetHyperESP(pVM, pVM->vmm.s.pbEMTStackBottomRC); 237 238 /* 239 * Allocate GC & R0 Logger instances (they are finalized in the relocator). 240 */ 241 #ifdef LOG_ENABLED 242 PRTLOGGER pLogger = RTLogDefaultInstance(); 243 if (pLogger) 244 { 245 pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]); 246 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC); 247 if (VBOX_SUCCESS(rc)) 248 { 249 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC); 250 251 # ifdef VBOX_WITH_R0_LOGGING 252 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]), 253 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger); 254 if (VBOX_SUCCESS(rc)) 255 { 256 pVM->vmm.s.pR0Logger->pVM = pVM->pVMR0; 257 //pVM->vmm.s.pR0Logger->fCreated = false; 258 pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]); 259 } 260 # endif 261 } 262 } 263 #endif /* LOG_ENABLED */ 264 265 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 266 /* 267 * Allocate RC release logger instances (finalized in the relocator). 268 */ 269 if (VBOX_SUCCESS(rc)) 270 { 271 PRTLOGGER pRelLogger = RTLogRelDefaultInstance(); 272 if (pRelLogger) 273 { 274 pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]); 275 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC); 276 if (VBOX_SUCCESS(rc)) 277 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC); 278 } 279 } 280 #endif /* VBOX_WITH_RC_RELEASE_LOGGING */ 216 if (RT_SUCCESS(rc)) 217 { 218 rc = vmmR3InitStacks(pVM); 219 if (RT_SUCCESS(rc)) 220 { 221 rc = vmmR3InitLoggers(pVM); 281 222 282 223 #ifdef VBOX_WITH_NMI … … 284 225 * Allocate mapping for the host APIC. 285 226 */ 286 if ( VBOX_SUCCESS(rc))227 if (RT_SUCCESS(rc)) 287 228 { 288 229 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase); … … 290 231 } 291 232 #endif 292 if ( VBOX_SUCCESS(rc))233 if (RT_SUCCESS(rc)) 293 234 { 294 235 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock); … … 303 244 return VINF_SUCCESS; 304 245 } 305 AssertRC(rc);306 246 } 307 247 } … … 351 291 */ 352 292 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE); 353 pVM->vmm.s.pv HCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);293 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode); 354 294 int rc = VERR_NO_MEMORY; 355 if (pVM->vmm.s.pv HCCoreCodeR3)356 { 357 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pv HCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);295 if (pVM->vmm.s.pvCoreCodeR3) 296 { 297 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode); 358 298 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT) 359 299 { … … 371 311 do 372 312 { 373 paBadTries[i].pvR3 = pVM->vmm.s.pv HCCoreCodeR3;374 paBadTries[i].pvR0 = pVM->vmm.s.pv HCCoreCodeR0;313 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3; 314 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0; 375 315 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode; 376 316 i++; 377 pVM->vmm.s.pv HCCoreCodeR0 = NIL_RTR0PTR;317 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR; 378 318 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS; 379 pVM->vmm.s.pv HCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);380 if (!pVM->vmm.s.pv HCCoreCodeR3)319 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode); 320 if (!pVM->vmm.s.pvCoreCodeR3) 381 321 break; 382 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pv HCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);322 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode); 383 323 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT 384 324 && i < cTries - 1); … … 387 327 if (VBOX_FAILURE(rc)) 388 328 { 389 paBadTries[i].pvR3 = pVM->vmm.s.pv HCCoreCodeR3;390 paBadTries[i].pvR0 = pVM->vmm.s.pv HCCoreCodeR0;329 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3; 330 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0; 391 331 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode; 392 332 paBadTries[i].cb = pVM->vmm.s.cbCoreCode; … … 412 352 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher]; 413 353 if (pSwitcher) 414 memcpy((uint8_t *)pVM->vmm.s.pv HCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],354 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher], 415 355 pSwitcher->pvCode, pSwitcher->cbCode); 416 356 } … … 420 360 */ 421 361 RTGCPTR GCPtr; 422 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pv HCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &GCPtr);362 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &GCPtr); 423 363 if (VBOX_SUCCESS(rc)) 424 364 { 425 pVM->vmm.s.pv GCCoreCode= GCPtr;365 pVM->vmm.s.pvCoreCodeRC = GCPtr; 426 366 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL); 427 367 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VRv Phys=%VHp cb=%#x\n", 428 pVM->vmm.s.pv HCCoreCodeR3, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));368 pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode)); 429 369 430 370 /* … … 438 378 439 379 /* shit */ 440 AssertMsgFailed(("PGMR3Map(,%VRv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pv GCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));441 SUPContFree(pVM->vmm.s.pv HCCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);380 AssertMsgFailed(("PGMR3Map(,%VRv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc)); 381 SUPContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT); 442 382 } 443 383 else … … 446 386 cbCoreCode); 447 387 448 pVM->vmm.s.pv HCCoreCodeR3 = NULL;449 pVM->vmm.s.pv HCCoreCodeR0 = NIL_RTR0PTR;450 pVM->vmm.s.pv GCCoreCode= 0;388 pVM->vmm.s.pvCoreCodeR3 = NULL; 389 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR; 390 pVM->vmm.s.pvCoreCodeRC = 0; 451 391 return rc; 392 } 393 394 395 /** 396 * Allocate & setup the VMM RC stack(s) (for EMTs). 397 * 398 * The stacks are also used for long jumps in Ring-0. 399 * 400 * @returns VBox status code. 401 * @param pVM Pointer to the shared VM structure. 402 * 403 * @remarks The optional guard page gets it protection setup up during R3 init 404 * completion because of init order issues. 405 */ 406 static int vmmR3InitStacks(PVM pVM) 407 { 408 /** @todo SMP: On stack per vCPU. */ 409 #ifdef VBOX_STRICT_VMM_STACK 410 int rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3); 411 #else 412 int rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3); 413 #endif 414 if (VBOX_SUCCESS(rc)) 415 { 416 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = MMHyperR3ToR0(pVM, pVM->vmm.s.pbEMTStackR3); 417 pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3); 418 pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE; 419 AssertRelease(pVM->vmm.s.pbEMTStackRC); 420 421 CPUMSetHyperESP(pVM, pVM->vmm.s.pbEMTStackBottomRC); 422 } 423 424 return rc; 425 } 426 427 428 /** 429 * Initialize the loggers. 430 * 431 * @returns VBox status code. 432 * @param pVM Pointer to the shared VM structure. 433 */ 434 static int vmmR3InitLoggers(PVM pVM) 435 { 436 int rc; 437 438 /* 439 * Allocate RC & R0 Logger instances (they are finalized in the relocator). 440 */ 441 #ifdef LOG_ENABLED 442 PRTLOGGER pLogger = RTLogDefaultInstance(); 443 if (pLogger) 444 { 445 pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]); 446 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3); 447 if (RT_FAILURE(rc)) 448 return rc; 449 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3); 450 451 # ifdef VBOX_WITH_R0_LOGGING 452 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]), 453 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0LoggerR3); 454 if (RT_FAILURE(rc)) 455 return rc; 456 pVM->vmm.s.pR0LoggerR3->pVM = pVM->pVMR0; 457 //pVM->vmm.s.pR0LoggerR3->fCreated = false; 458 pVM->vmm.s.pR0LoggerR3->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]); 459 pVM->vmm.s.pR0LoggerR0 = MMHyperR3ToR0(pVM, pVM->vmm.s.pR0LoggerR3); 460 # endif 461 } 462 #endif /* LOG_ENABLED */ 463 464 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 465 /* 466 * Allocate RC release logger instances (finalized in the relocator). 467 */ 468 PRTLOGGER pRelLogger = RTLogRelDefaultInstance(); 469 if (pRelLogger) 470 { 471 pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]); 472 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3); 473 if (RT_FAILURE(rc)) 474 return rc; 475 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3); 476 } 477 #endif /* VBOX_WITH_RC_RELEASE_LOGGING */ 478 return VINF_SUCCESS; 452 479 } 453 480 … … 533 560 * Two inaccessible pages at each sides of the stack to catch over/under-flows. 534 561 */ 535 memset(pVM->vmm.s.pb HCStack- PAGE_SIZE, 0xcc, PAGE_SIZE);536 PGMMapSetPage(pVM, MMHyper HC2GC(pVM, pVM->vmm.s.pbHCStack- PAGE_SIZE), PAGE_SIZE, 0);537 RTMemProtect(pVM->vmm.s.pb HCStack- PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);538 539 memset(pVM->vmm.s.pb HCStack+ VMM_STACK_SIZE, 0xcc, PAGE_SIZE);540 PGMMapSetPage(pVM, MMHyper HC2GC(pVM, pVM->vmm.s.pbHCStack+ VMM_STACK_SIZE), PAGE_SIZE, 0);541 RTMemProtect(pVM->vmm.s.pb HCStack+ VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);562 memset(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE); 563 PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE), PAGE_SIZE, 0); 564 RTMemProtect(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); 565 566 memset(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE); 567 PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE), PAGE_SIZE, 0); 568 RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); 542 569 #endif 543 570 … … 582 609 * Initialize the ring-0 logger if we haven't done so yet. 583 610 */ 584 if ( pVM->vmm.s.pR0Logger 585 && !pVM->vmm.s.pR0Logger ->fCreated)611 if ( pVM->vmm.s.pR0LoggerR3 612 && !pVM->vmm.s.pR0LoggerR3->fCreated) 586 613 { 587 614 rc = VMMR3UpdateLoggers(pVM); … … 601 628 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, VMMGetSvnRev(), NULL); 602 629 #endif 603 if ( pVM->vmm.s.pR0Logger 604 && pVM->vmm.s.pR0Logger ->Logger.offScratch > 0)605 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger ->Logger, NULL);630 if ( pVM->vmm.s.pR0LoggerR3 631 && pVM->vmm.s.pR0LoggerR3->Logger.offScratch > 0) 632 RTLogFlushToLogger(&pVM->vmm.s.pR0LoggerR3->Logger, NULL); 606 633 if (rc != VINF_VMM_CALL_HOST) 607 634 break; … … 654 681 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR32)); /* trampoline param: stacksize. */ 655 682 CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */ 656 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfn GCCallTrampoline);683 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC); 657 684 658 685 for (;;) … … 665 692 #endif 666 693 #ifdef LOG_ENABLED 667 PRTLOGGERRC pLogger = pVM->vmm.s.p LoggerHC;694 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3; 668 695 if ( pLogger 669 696 && pLogger->offScratch > 0) … … 671 698 #endif 672 699 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 673 PRTLOGGERRC pRelLogger = pVM->vmm.s.pR elLoggerHC;700 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3; 674 701 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0)) 675 702 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger); … … 714 741 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_TERM, 0, NULL); 715 742 #endif 716 if ( pVM->vmm.s.pR0Logger 717 && pVM->vmm.s.pR0Logger ->Logger.offScratch > 0)718 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger ->Logger, NULL);743 if ( pVM->vmm.s.pR0LoggerR3 744 && pVM->vmm.s.pR0LoggerR3->Logger.offScratch > 0) 745 RTLogFlushToLogger(&pVM->vmm.s.pR0LoggerR3->Logger, NULL); 719 746 if (rc != VINF_VMM_CALL_HOST) 720 747 break; … … 735 762 * Make the two stack guard pages present again. 736 763 */ 737 RTMemProtect(pVM->vmm.s.pb HCStack- PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);738 RTMemProtect(pVM->vmm.s.pb HCStack+ VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);764 RTMemProtect(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 765 RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 739 766 #endif 740 767 return rc; … … 759 786 * Recalc the GC address. 760 787 */ 761 pVM->vmm.s.pv GCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3);788 pVM->vmm.s.pvCoreCodeRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pvCoreCodeR3); 762 789 763 790 /* … … 779 806 pSwitcher->pfnRelocate(pVM, 780 807 pSwitcher, 781 (uint8_t *)pVM->vmm.s.pv HCCoreCodeR0 + off,782 (uint8_t *)pVM->vmm.s.pv HCCoreCodeR3 + off,783 pVM->vmm.s.pv GCCoreCode+ off,808 (uint8_t *)pVM->vmm.s.pvCoreCodeR0 + off, 809 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off, 810 pVM->vmm.s.pvCoreCodeRC + off, 784 811 pVM->vmm.s.HCPhysCoreCode + off); 785 812 } … … 790 817 */ 791 818 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher]; 792 RTGCPTR GCPtr = pVM->vmm.s.pv GCCoreCode+ pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];793 pVM->vmm.s.pfnG CGuestToHost= GCPtr + pSwitcher->offGCGuestToHost;794 pVM->vmm.s.pfn GCCallTrampoline= GCPtr + pSwitcher->offGCCallTrampoline;819 RTGCPTR GCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher]; 820 pVM->vmm.s.pfnGuestToHostRC = GCPtr + pSwitcher->offGCGuestToHost; 821 pVM->vmm.s.pfnCallTrampolineRC = GCPtr + pSwitcher->offGCCallTrampoline; 795 822 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm; 796 823 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx; … … 800 827 * Get other GC entry points. 801 828 */ 802 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUM GCResumeGuest);829 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest); 803 830 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc)); 804 831 805 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUM GCResumeGuestV86);832 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86); 806 833 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc)); 807 834 … … 825 852 */ 826 853 int rc = VINF_SUCCESS; 827 RT GCPTR32 GCPtrLoggerFlush = 0;828 829 if (pVM->vmm.s.p LoggerHC854 RTRCPTR RCPtrLoggerFlush = 0; 855 856 if (pVM->vmm.s.pRCLoggerR3 830 857 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 831 || pVM->vmm.s.pR elLoggerHC858 || pVM->vmm.s.pRCRelLoggerR3 832 859 #endif 833 860 ) 834 861 { 835 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", & GCPtrLoggerFlush);862 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush); 836 863 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc)); 837 864 } 838 865 839 if (pVM->vmm.s.p LoggerHC)840 { 841 RT GCPTR32 GCPtrLoggerWrapper = 0;842 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", & GCPtrLoggerWrapper);866 if (pVM->vmm.s.pRCLoggerR3) 867 { 868 RTRCPTR RCPtrLoggerWrapper = 0; 869 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper); 843 870 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc)); 844 pVM->vmm.s.p LoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);845 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.p LoggerHC, pVM->vmm.s.cbLoggerGC,846 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);871 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3); 872 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pRCLoggerR3, pVM->vmm.s.cbRCLogger, 873 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED); 847 874 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc)); 848 875 } 849 876 850 877 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 851 if (pVM->vmm.s.pR elLoggerHC)852 { 853 RT GCPTR32 GCPtrLoggerWrapper = 0;854 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", & GCPtrLoggerWrapper);878 if (pVM->vmm.s.pRCRelLoggerR3) 879 { 880 RTRCPTR RCPtrLoggerWrapper = 0; 881 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper); 855 882 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc)); 856 pVM->vmm.s.pR elLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);857 rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pR elLoggerHC, pVM->vmm.s.cbRelLoggerGC,858 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);883 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3); 884 rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pRCRelLoggerR3, pVM->vmm.s.cbRCRelLogger, 885 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED); 859 886 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc)); 860 887 } … … 862 889 863 890 /* 864 * For the ring-0 EMT logger, we use a per-thread logger 865 * in stance inring-0. Only initialize it once.866 */ 867 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;868 if (pR0Logger )869 { 870 if (!pR0Logger ->fCreated)891 * For the ring-0 EMT logger, we use a per-thread logger instance 892 * in ring-0. Only initialize it once. 893 */ 894 PVMMR0LOGGER pR0LoggerR3 = pVM->vmm.s.pR0LoggerR3; 895 if (pR0LoggerR3) 896 { 897 if (!pR0LoggerR3->fCreated) 871 898 { 872 899 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR; … … 878 905 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc); 879 906 880 rc = RTLogCreateForR0(&pR0Logger ->Logger, pR0Logger->cbLogger,907 rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger, 881 908 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush, 882 909 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY); 883 910 AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc); 884 pR0Logger ->fCreated = true;911 pR0LoggerR3->fCreated = true; 885 912 } 886 913 887 rc = RTLogCopyGroupsAndFlags(&pR0Logger ->Logger, NULL /* default */, pVM->vmm.s.pLoggerHC->fFlags, RTLOGFLAGS_BUFFERED);914 rc = RTLogCopyGroupsAndFlags(&pR0LoggerR3->Logger, NULL /* default */, pVM->vmm.s.pRCLoggerR3->fFlags, RTLOGFLAGS_BUFFERED); 888 915 AssertRC(rc); 889 916 } … … 1659 1686 pVM->vmm.s.enmSwitcher = enmSwitcher; 1660 1687 1661 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pv HCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */1662 pVM->vmm.s.pfn R0HostToGuest= pbCodeR0 + pSwitcher->offR0HostToGuest;1663 1664 RTGCPTR GCPtr = pVM->vmm.s.pv GCCoreCode+ pVM->vmm.s.aoffSwitchers[enmSwitcher];1665 pVM->vmm.s.pfnG CGuestToHost= GCPtr + pSwitcher->offGCGuestToHost;1666 pVM->vmm.s.pfn GCCallTrampoline= GCPtr + pSwitcher->offGCCallTrampoline;1688 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */ 1689 pVM->vmm.s.pfnHostToGuestR0 = pbCodeR0 + pSwitcher->offR0HostToGuest; 1690 1691 RTGCPTR GCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher]; 1692 pVM->vmm.s.pfnGuestToHostRC = GCPtr + pSwitcher->offGCGuestToHost; 1693 pVM->vmm.s.pfnCallTrampolineRC = GCPtr + pSwitcher->offGCCallTrampoline; 1667 1694 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm; 1668 1695 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx; … … 1694 1721 1695 1722 /** 1696 * Resolve a builtin GC symbol. 1697 * Called by PDM when loading or relocating GC modules. 1723 * Resolve a builtin RC symbol. 1724 * 1725 * Called by PDM when loading or relocating RC modules. 1698 1726 * 1699 1727 * @returns VBox status 1700 * @param pVM VM Handle. 1701 * @param pszSymbol Symbol to resolv 1702 * @param pGCPtrValue Where to store the symbol value. 1728 * @param pVM VM Handle. 1729 * @param pszSymbol Symbol to resolv 1730 * @param pRCPtrValue Where to store the symbol value. 1731 * 1703 1732 * @remark This has to work before VMMR3Relocate() is called. 1704 1733 */ 1705 VMMR3DECL(int) VMMR3GetImport GC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)1734 VMMR3DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue) 1706 1735 { 1707 1736 if (!strcmp(pszSymbol, "g_Logger")) 1708 1737 { 1709 if (pVM->vmm.s.p LoggerHC)1710 pVM->vmm.s.p LoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);1711 *p GCPtrValue = pVM->vmm.s.pLoggerGC;1738 if (pVM->vmm.s.pRCLoggerR3) 1739 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3); 1740 *pRCPtrValue = pVM->vmm.s.pRCLoggerRC; 1712 1741 } 1713 1742 else if (!strcmp(pszSymbol, "g_RelLogger")) 1714 1743 { 1715 1744 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 1716 if (pVM->vmm.s.pR elLoggerHC)1717 pVM->vmm.s.pR elLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);1718 *p GCPtrValue = pVM->vmm.s.pRelLoggerGC;1745 if (pVM->vmm.s.pRCRelLoggerR3) 1746 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3); 1747 *pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC; 1719 1748 #else 1720 *p GCPtrValue = NIL_RTGCPTR;1749 *pRCPtrValue = NIL_RTRCPTR; 1721 1750 #endif 1722 1751 } … … 1880 1909 */ 1881 1910 CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM 1882 ? pVM->vmm.s.pfnCPUM GCResumeGuestV861883 : pVM->vmm.s.pfnCPUM GCResumeGuest);1911 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86 1912 : pVM->vmm.s.pfnCPUMRCResumeGuest); 1884 1913 CPUMSetHyperESP(pVM, pVM->vmm.s.pbEMTStackBottomRC); 1885 1914 … … 1897 1926 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN); 1898 1927 if (RT_LIKELY(rc == VINF_SUCCESS)) 1899 rc = pVM->vmm.s.iLastG CRc;1928 rc = pVM->vmm.s.iLastGZRc; 1900 1929 #endif 1901 1930 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 1905 1934 */ 1906 1935 #ifdef LOG_ENABLED 1907 PRTLOGGERRC pLogger = pVM->vmm.s.p LoggerHC;1936 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3; 1908 1937 if ( pLogger 1909 1938 && pLogger->offScratch > 0) … … 1911 1940 #endif 1912 1941 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 1913 PRTLOGGERRC pRelLogger = pVM->vmm.s.pR elLoggerHC;1942 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3; 1914 1943 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0)) 1915 1944 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger); … … 1947 1976 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN); 1948 1977 if (RT_LIKELY(rc == VINF_SUCCESS)) 1949 rc = pVM->vmm.s.iLastG CRc;1978 rc = pVM->vmm.s.iLastGZRc; 1950 1979 #endif 1951 1980 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 1955 1984 * Flush the log 1956 1985 */ 1957 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;1958 if ( pR0Logger 1959 && pR0Logger ->Logger.offScratch > 0)1960 RTLogFlushToLogger(&pR0Logger ->Logger, NULL);1986 PVMMR0LOGGER pR0LoggerR3 = pVM->vmm.s.pR0LoggerR3; 1987 if ( pR0LoggerR3 1988 && pR0LoggerR3->Logger.offScratch > 0) 1989 RTLogFlushToLogger(&pR0LoggerR3->Logger, NULL); 1961 1990 #endif /* !LOG_ENABLED */ 1962 1991 if (rc != VINF_VMM_CALL_HOST) … … 2015 2044 CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR32)); /* stack frame size */ 2016 2045 CPUMPushHyper(pVM, GCPtrEntry); /* what to call */ 2017 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfn GCCallTrampoline);2046 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC); 2018 2047 2019 2048 /* … … 2030 2059 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN); 2031 2060 if (RT_LIKELY(rc == VINF_SUCCESS)) 2032 rc = pVM->vmm.s.iLastG CRc;2061 rc = pVM->vmm.s.iLastGZRc; 2033 2062 #endif 2034 2063 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 2038 2067 */ 2039 2068 #ifdef LOG_ENABLED 2040 PRTLOGGERRC pLogger = pVM->vmm.s.p LoggerHC;2069 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3; 2041 2070 if ( pLogger 2042 2071 && pLogger->offScratch > 0) … … 2044 2073 #endif 2045 2074 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 2046 PRTLOGGERRC pRelLogger = pVM->vmm.s.pR elLoggerHC;2075 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3; 2047 2076 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0)) 2048 2077 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger); … … 2063 2092 2064 2093 /** 2065 * Resumes executing hypervisor code when interrupted 2066 * by a queue flush or adebug event.2094 * Resumes executing hypervisor code when interrupted by a queue flush or a 2095 * debug event. 2067 2096 * 2068 2097 * @returns VBox status code. … … 2086 2115 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN); 2087 2116 if (RT_LIKELY(rc == VINF_SUCCESS)) 2088 rc = pVM->vmm.s.iLastG CRc;2117 rc = pVM->vmm.s.iLastGZRc; 2089 2118 #endif 2090 2119 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 2094 2123 */ 2095 2124 #ifdef LOG_ENABLED 2096 PRTLOGGERRC pLogger = pVM->vmm.s.p LoggerHC;2125 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3; 2097 2126 if ( pLogger 2098 2127 && pLogger->offScratch > 0) … … 2100 2129 #endif 2101 2130 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 2102 PRTLOGGERRC pRelLogger = pVM->vmm.s.pR elLoggerHC;2131 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3; 2103 2132 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0)) 2104 2133 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger); -
trunk/src/VBox/VMM/VMMGC/VMMGC.cpp
r12989 r13714 184 184 VMMRCDECL(void) VMMGCGuestToHost(PVM pVM, int rc) 185 185 { 186 pVM->vmm.s.pfnG CGuestToHost(rc);186 pVM->vmm.s.pfnGuestToHostRC(rc); 187 187 } 188 188 … … 202 202 pVM->vmm.s.u64CallHostArg = uArg; 203 203 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR; 204 pVM->vmm.s.pfnG CGuestToHost(VINF_VMM_CALL_HOST);204 pVM->vmm.s.pfnGuestToHostRC(VINF_VMM_CALL_HOST); 205 205 return pVM->vmm.s.rcCallHost; 206 206 } -
trunk/src/VBox/VMM/VMMGuruMeditation.cpp
r13703 r13714 293 293 */ 294 294 /* core code? */ 295 if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pv GCCoreCode< pVM->vmm.s.cbCoreCode)295 if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvCoreCodeRC < pVM->vmm.s.cbCoreCode) 296 296 pHlp->pfnPrintf(pHlp, 297 297 "!! EIP is in CoreCode, offset %#x\n", 298 uEIP - (RTGCUINTPTR)pVM->vmm.s.pv GCCoreCode);298 uEIP - (RTGCUINTPTR)pVM->vmm.s.pvCoreCodeRC); 299 299 else 300 300 { /* ask PDM */ /** @todo ask DBGFR3Sym later? */ -
trunk/src/VBox/VMM/VMMInternal.h
r13701 r13714 165 165 RTINT offVM; 166 166 167 /** @name World Switcher and Related 168 * @{ 169 */ 167 170 /** Size of the core code. */ 168 171 RTUINT cbCoreCode; 169 172 /** Physical address of core code. */ 170 173 RTHCPHYS HCPhysCoreCode; 171 /** @todo pvHCCoreCodeR3 -> pvCoreCodeR3, pvHCCoreCodeR0 -> pvCoreCodeR0 */172 174 /** Pointer to core code ring-3 mapping - contiguous memory. 173 175 * At present this only means the context switcher code. */ 174 RTR3PTR pv HCCoreCodeR3;176 RTR3PTR pvCoreCodeR3; 175 177 /** Pointer to core code ring-0 mapping - contiguous memory. 176 178 * At present this only means the context switcher code. */ 177 RTR0PTR pv HCCoreCodeR0;179 RTR0PTR pvCoreCodeR0; 178 180 /** Pointer to core code guest context mapping. */ 179 RT GCPTR32 pvGCCoreCode;181 RTRCPTR pvCoreCodeRC; 180 182 #ifdef VBOX_WITH_NMI 181 183 /** The guest context address of the APIC (host) mapping. */ 182 RT GCPTR32GCPtrApicBase;183 RT GCPTR32pGCPadding0; /**< Alignment padding */184 RTRCPTR GCPtrApicBase; 185 RTRCPTR pGCPadding0; /**< Alignment padding */ 184 186 #endif 185 187 /** The current switcher. … … 192 194 193 195 /** Host to guest switcher entry point. */ 194 R0PTRTYPE(PFNVMMSWITCHERHC) pfn R0HostToGuest;196 R0PTRTYPE(PFNVMMSWITCHERHC) pfnHostToGuestR0; 195 197 /** Guest to host switcher entry point. */ 196 RCPTRTYPE(PFNVMMSWITCHERRC) pfnG CGuestToHost;198 RCPTRTYPE(PFNVMMSWITCHERRC) pfnGuestToHostRC; 197 199 /** Call Trampoline. See vmmGCCallTrampoline(). */ 198 RT GCPTR32 pfnGCCallTrampoline;200 RTRCPTR pfnCallTrampolineRC; 199 201 200 202 /** Resume Guest Execution. See CPUMGCResumeGuest(). */ 201 RT GCPTR32 pfnCPUMGCResumeGuest;203 RTRCPTR pfnCPUMRCResumeGuest; 202 204 /** Resume Guest Execution in V86 mode. See CPUMGCResumeGuestV86(). */ 203 RT GCPTR32 pfnCPUMGCResumeGuestV86;204 /** The last GCreturn code. */205 RTINT iLastG CRc;205 RTRCPTR pfnCPUMRCResumeGuestV86; 206 /** The last RC/R0 return code. */ 207 RTINT iLastGZRc; 206 208 #if HC_ARCH_BITS == 64 207 209 uint32_t u32Padding0; /**< Alignment padding. */ 208 210 #endif 211 /** @} */ 209 212 210 213 /** VMM stack, pointer to the top of the stack in R3. … … 217 220 RCPTRTYPE(uint8_t *) pbEMTStackBottomRC; 218 221 219 /** Pointer to the GC logger instance - GC Ptr. 222 /** @name Logging 223 * @{ 224 */ 225 /** Size of the allocated logger instance (pRCLoggerRC/pRCLoggerR3). */ 226 uint32_t cbRCLogger; 227 /** Pointer to the RC logger instance - RC Ptr. 220 228 * This is NULL if logging is disabled. */ 221 RCPTRTYPE(PRTLOGGERRC) pLoggerGC; 222 /** Size of the allocated logger instance (pLoggerGC/pLoggerHC). */ 223 RTUINT cbLoggerGC; 224 /** Pointer to the GC logger instance - HC Ptr. 229 RCPTRTYPE(PRTLOGGERRC) pRCLoggerRC; 230 /** Pointer to the GC logger instance - R3 Ptr. 225 231 * This is NULL if logging is disabled. */ 226 R3PTRTYPE(PRTLOGGERRC) pLoggerHC; 227 228 /** Pointer to the R0 logger instance. 232 R3PTRTYPE(PRTLOGGERRC) pRCLoggerR3; 233 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 234 /** Size of the allocated release logger instance (pRCRelLoggerRC/pRCRelLoggerR3). 235 * This may differ from cbRCLogger. */ 236 uint32_t cbRCRelLogger; 237 /** Pointer to the GC release logger instance - RC Ptr. */ 238 RCPTRTYPE(PRTLOGGERRC) pRCRelLoggerRC; 239 /** Pointer to the GC release logger instance - R3 Ptr. */ 240 R3PTRTYPE(PRTLOGGERRC) pRCRelLoggerR3; 241 #endif /* VBOX_WITH_RC_RELEASE_LOGGING */ 242 /** Pointer to the R0 logger instance - R3 Ptr. 229 243 * This is NULL if logging is disabled. */ 230 R3R0PTRTYPE(PVMMR0LOGGER) pR0Logger; 231 232 #ifdef VBOX_WITH_RC_RELEASE_LOGGING 233 /** Pointer to the GC release logger instance - GC Ptr. */ 234 RCPTRTYPE(PRTLOGGERRC) pRelLoggerGC; 235 /** Size of the allocated release logger instance (pRelLoggerGC/pRelLoggerHC). 236 * This may differ from cbLoggerGC. */ 237 RTUINT cbRelLoggerGC; 238 /** Pointer to the GC release logger instance - HC Ptr. */ 239 R3PTRTYPE(PRTLOGGERRC) pRelLoggerHC; 240 #endif /* VBOX_WITH_RC_RELEASE_LOGGING */ 244 R3PTRTYPE(PVMMR0LOGGER) pR0LoggerR3; 245 /** Pointer to the R0 logger instance - R0 Ptr. 246 * This is NULL if logging is disabled. */ 247 R0PTRTYPE(PVMMR0LOGGER) pR0LoggerR0; 248 /** @} */ 241 249 242 250 /** Global VM critical section. */ -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r13701 r13714 175 175 * Register the EMT R0 logger instance. 176 176 */ 177 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger ;177 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0LoggerR0; 178 178 if (pR0Logger) 179 179 { … … 489 489 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 490 490 register int rc; 491 pVM->vmm.s.iLastG CRc = rc = pVM->vmm.s.pfnR0HostToGuest(pVM);491 pVM->vmm.s.iLastGZRc = rc = pVM->vmm.s.pfnHostToGuestR0(pVM); 492 492 493 493 #ifdef VBOX_WITH_STATISTICS … … 548 548 549 549 RTCCUINTREG fFlags = ASMIntDisableFlags(); 550 int rc = pVM->vmm.s.pfn R0HostToGuest(pVM);550 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM); 551 551 /** @todo dispatch interrupts? */ 552 552 ASMSetFlags(fFlags); … … 576 576 * 577 577 * @param pVM The VM to operate on. 578 * The return code is stored in pVM->vmm.s.iLastG CRc.578 * The return code is stored in pVM->vmm.s.iLastGZRc. 579 579 * @param enmOperation Which operation to execute. 580 580 * @remarks Assume called with interrupts _enabled_. … … 596 596 597 597 TMNotifyStartOfExecution(pVM); 598 int rc = pVM->vmm.s.pfn R0HostToGuest(pVM);599 pVM->vmm.s.iLastG CRc = rc;598 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM); 599 pVM->vmm.s.iLastGZRc = rc; 600 600 TMNotifyEndOfExecution(pVM); 601 601 … … 614 614 { 615 615 Assert(!pVM->vmm.s.fSwitcherDisabled); 616 pVM->vmm.s.iLastG CRc = VERR_NOT_SUPPORTED;616 pVM->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED; 617 617 } 618 618 break; … … 650 650 rc = VINF_EM_RAW_INTERRUPT; 651 651 } 652 pVM->vmm.s.iLastG CRc = rc;652 pVM->vmm.s.iLastGZRc = rc; 653 653 #ifndef RT_OS_WINDOWS /** @todo check other hosts */ 654 654 ASMSetFlags(uFlags); … … 666 666 */ 667 667 case VMMR0_DO_NOP: 668 pVM->vmm.s.iLastG CRc = VINF_SUCCESS;668 pVM->vmm.s.iLastGZRc = VINF_SUCCESS; 669 669 break; 670 670 … … 674 674 default: 675 675 AssertMsgFailed(("%#x\n", enmOperation)); 676 pVM->vmm.s.iLastG CRc = VERR_NOT_SUPPORTED;676 pVM->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED; 677 677 break; 678 678 } … … 819 819 820 820 RTCCUINTREG fFlags = ASMIntDisableFlags(); 821 int rc = pVM->vmm.s.pfn R0HostToGuest(pVM);821 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM); 822 822 /** @todo dispatch interrupts? */ 823 823 ASMSetFlags(fFlags); -
trunk/src/VBox/VMM/VMMTests.cpp
r13698 r13714 69 69 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR32)); /* stack frame size */ 70 70 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 71 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfn GCCallTrampoline);71 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC); 72 72 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN); 73 73 if (RT_LIKELY(rc == VINF_SUCCESS)) 74 rc = pVM->vmm.s.iLastG CRc;74 rc = pVM->vmm.s.iLastGZRc; 75 75 return rc; 76 76 } … … 106 106 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR32)); /* stack frame size */ 107 107 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 108 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfn GCCallTrampoline);108 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC); 109 109 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN); 110 110 if (RT_LIKELY(rc == VINF_SUCCESS)) 111 rc = pVM->vmm.s.iLastG CRc;111 rc = pVM->vmm.s.iLastGZRc; 112 112 bool fDump = false; 113 113 if (rc != rcExpect) … … 340 340 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR32)); /* stack frame size */ 341 341 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 342 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfn GCCallTrampoline);343 Log(("trampoline=%x\n", pVM->vmm.s.pfn GCCallTrampoline));342 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC); 343 Log(("trampoline=%x\n", pVM->vmm.s.pfnCallTrampolineRC)); 344 344 345 345 /* … … 354 354 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN); 355 355 if (RT_LIKELY(rc == VINF_SUCCESS)) 356 rc = pVM->vmm.s.iLastG CRc;356 rc = pVM->vmm.s.iLastGZRc; 357 357 if (VBOX_FAILURE(rc)) 358 358 { … … 402 402 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR32)); /* stack frame size */ 403 403 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 404 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfn GCCallTrampoline);404 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC); 405 405 406 406 uint64_t TickThisStart = ASMReadTSC(); 407 407 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN); 408 408 if (RT_LIKELY(rc == VINF_SUCCESS)) 409 rc = pVM->vmm.s.iLastG CRc;409 rc = pVM->vmm.s.iLastGZRc; 410 410 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart; 411 411 if (VBOX_FAILURE(rc)) … … 529 529 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR32)); /* stack frame size */ 530 530 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 531 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfn GCCallTrampoline);531 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnCallTrampolineRC); 532 532 533 533 CPUMQueryHyperCtxPtr(pVM, &pHyperCtx); -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r13701 r13714 826 826 GEN_CHECK_OFF(VMM, cbCoreCode); 827 827 GEN_CHECK_OFF(VMM, HCPhysCoreCode); 828 GEN_CHECK_OFF(VMM, pv HCCoreCodeR3);829 GEN_CHECK_OFF(VMM, pv HCCoreCodeR0);830 GEN_CHECK_OFF(VMM, pv GCCoreCode);828 GEN_CHECK_OFF(VMM, pvCoreCodeR3); 829 GEN_CHECK_OFF(VMM, pvCoreCodeR0); 830 GEN_CHECK_OFF(VMM, pvCoreCodeRC); 831 831 GEN_CHECK_OFF(VMM, enmSwitcher); 832 832 GEN_CHECK_OFF(VMM, aoffSwitchers); 833 833 GEN_CHECK_OFF(VMM, aoffSwitchers[1]); 834 GEN_CHECK_OFF(VMM, pfn R0HostToGuest);835 GEN_CHECK_OFF(VMM, pfnG CGuestToHost);836 GEN_CHECK_OFF(VMM, pfn GCCallTrampoline);837 GEN_CHECK_OFF(VMM, pfnCPUM GCResumeGuest);838 GEN_CHECK_OFF(VMM, pfnCPUM GCResumeGuestV86);839 GEN_CHECK_OFF(VMM, iLastG CRc);834 GEN_CHECK_OFF(VMM, pfnHostToGuestR0); 835 GEN_CHECK_OFF(VMM, pfnGuestToHostRC); 836 GEN_CHECK_OFF(VMM, pfnCallTrampolineRC); 837 GEN_CHECK_OFF(VMM, pfnCPUMRCResumeGuest); 838 GEN_CHECK_OFF(VMM, pfnCPUMRCResumeGuestV86); 839 GEN_CHECK_OFF(VMM, iLastGZRc); 840 840 GEN_CHECK_OFF(VMM, pbEMTStackR3); 841 841 GEN_CHECK_OFF(VMM, pbEMTStackRC); 842 842 GEN_CHECK_OFF(VMM, pbEMTStackBottomRC); 843 GEN_CHECK_OFF(VMM, pLoggerGC); 844 GEN_CHECK_OFF(VMM, pLoggerHC); 845 GEN_CHECK_OFF(VMM, cbLoggerGC); 843 GEN_CHECK_OFF(VMM, pRCLoggerRC); 844 GEN_CHECK_OFF(VMM, pRCLoggerR3); 845 GEN_CHECK_OFF(VMM, pR0LoggerR0); 846 GEN_CHECK_OFF(VMM, pR0LoggerR3); 847 GEN_CHECK_OFF(VMM, cbRCLogger); 846 848 GEN_CHECK_OFF(VMM, CritSectVMLock); 847 849 GEN_CHECK_OFF(VMM, pYieldTimer);
Note:
See TracChangeset
for help on using the changeset viewer.

