VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMMTests.cpp@ 76553

Last change on this file since 76553 was 76553, checked in by vboxsync, 5 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 37.0 KB
RevLine 
[23]1/* $Id: VMMTests.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
[1]2/** @file
[1313]3 * VMM - The Virtual Machine Monitor Core, Tests.
[1]4 */
5
6/*
[76553]7 * Copyright (C) 2006-2019 Oracle Corporation
[1]8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
[5999]12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
[1]16 */
17
[1313]18//#define NO_SUPCALLR0VMM
[1]19
[57358]20
21/*********************************************************************************************************************************
22* Header Files *
23*********************************************************************************************************************************/
[1]24#define LOG_GROUP LOG_GROUP_VMM
[29250]25#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
[35346]26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/cpum.h>
[26152]29#include <VBox/dbg.h>
[45618]30#include <VBox/vmm/hm.h>
[35346]31#include <VBox/vmm/mm.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/selm.h>
[1]34#include "VMMInternal.h"
[35346]35#include <VBox/vmm/vm.h>
[1]36#include <VBox/err.h>
37#include <VBox/param.h>
[1313]38
[1]39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/time.h>
42#include <iprt/stream.h>
43#include <iprt/string.h>
[37955]44#include <iprt/x86.h>
[1]45
[63429]46
47#ifdef VBOX_WITH_RAW_MODE
48
[41985]49static void vmmR3TestClearStack(PVMCPU pVCpu)
50{
51 /* We leave the first 64 bytes of the stack alone because of strict
52 ring-0 long jump code uses it. */
53 memset(pVCpu->vmm.s.pbEMTStackR3 + 64, 0xaa, VMM_STACK_SIZE - 64);
54}
[1]55
[41985]56
[49164]57static int vmmR3ReportMsrRange(PVM pVM, uint32_t uMsr, uint64_t cMsrs, PRTSTREAM pReportStrm, uint32_t *pcMsrsFound)
58{
59 /*
60 * Preps.
61 */
62 RTRCPTR RCPtrEP;
[56051]63 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCTestReadMsrs", &RCPtrEP);
[49164]64 AssertMsgRCReturn(rc, ("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc), rc);
65
66 uint32_t const cMsrsPerCall = 16384;
67 uint32_t cbResults = cMsrsPerCall * sizeof(VMMTESTMSRENTRY);
68 PVMMTESTMSRENTRY paResults;
69 rc = MMHyperAlloc(pVM, cbResults, 0, MM_TAG_VMM, (void **)&paResults);
70 AssertMsgRCReturn(rc, ("Error allocating %#x bytes off the hyper heap: %Rrc\n", cbResults, rc), rc);
71 /*
72 * The loop.
73 */
74 RTRCPTR RCPtrResults = MMHyperR3ToRC(pVM, paResults);
75 uint32_t cMsrsFound = 0;
76 uint32_t uLastMsr = uMsr;
77 uint64_t uNsTsStart = RTTimeNanoTS();
78
79 for (;;)
80 {
81 if ( pReportStrm
82 && uMsr - uLastMsr > _64K
83 && (uMsr & (_4M - 1)) == 0)
84 {
85 if (uMsr - uLastMsr < 16U*_1M)
86 RTStrmFlush(pReportStrm);
87 RTPrintf("... %#010x [%u ns/msr] ...\n", uMsr, (RTTimeNanoTS() - uNsTsStart) / uMsr);
88 }
89
90 /*RT_BZERO(paResults, cbResults);*/
91 uint32_t const cBatch = RT_MIN(cMsrsPerCall, cMsrs);
92 rc = VMMR3CallRC(pVM, RCPtrEP, 4, pVM->pVMRC, uMsr, cBatch, RCPtrResults);
93 if (RT_FAILURE(rc))
94 {
95 RTPrintf("VMM: VMMR3CallRC failed rc=%Rrc, uMsr=%#x\n", rc, uMsr);
96 break;
97 }
98
99 for (uint32_t i = 0; i < cBatch; i++)
100 if (paResults[i].uMsr != UINT64_MAX)
101 {
102 if (paResults[i].uValue == 0)
103 {
104 if (pReportStrm)
[49367]105 RTStrmPrintf(pReportStrm,
106 " MVO(%#010llx, \"MSR\", UINT64_C(%#018llx)),\n", paResults[i].uMsr, paResults[i].uValue);
[49164]107 RTPrintf("%#010llx = 0\n", paResults[i].uMsr);
108 }
109 else
110 {
111 if (pReportStrm)
[49367]112 RTStrmPrintf(pReportStrm,
113 " MVO(%#010llx, \"MSR\", UINT64_C(%#018llx)),\n", paResults[i].uMsr, paResults[i].uValue);
[49164]114 RTPrintf("%#010llx = %#010x`%08x\n", paResults[i].uMsr,
[66848]115 RT_HI_U32(paResults[i].uValue), RT_LO_U32(paResults[i].uValue));
[49164]116 }
117 cMsrsFound++;
118 uLastMsr = paResults[i].uMsr;
119 }
120
121 /* Advance. */
122 if (cMsrs <= cMsrsPerCall)
123 break;
124 cMsrs -= cMsrsPerCall;
125 uMsr += cMsrsPerCall;
126 }
127
128 *pcMsrsFound += cMsrsFound;
129 MMHyperFree(pVM, paResults);
130 return rc;
131}
132
133
[1]134/**
[49164]135 * Produces a quick report of MSRs.
136 *
137 * @returns VBox status code.
[58122]138 * @param pVM The cross context VM structure.
[49367]139 * @param pReportStrm Pointer to the report output stream. Optional.
140 * @param fWithCpuId Whether CPUID should be included.
[49164]141 */
[49367]142static int vmmR3DoMsrQuickReport(PVM pVM, PRTSTREAM pReportStrm, bool fWithCpuId)
[49164]143{
144 uint64_t uTsStart = RTTimeNanoTS();
145 RTPrintf("=== MSR Quick Report Start ===\n");
146 RTStrmFlush(g_pStdOut);
[49367]147 if (fWithCpuId)
148 {
149 DBGFR3InfoStdErr(pVM->pUVM, "cpuid", "verbose");
150 RTPrintf("\n");
151 }
152 if (pReportStrm)
153 RTStrmPrintf(pReportStrm, "\n\n{\n");
[49372]154
155 static struct { uint32_t uFirst, cMsrs; } const s_aRanges[] =
156 {
157 { 0x00000000, 0x00042000 },
158 { 0x10000000, 0x00001000 },
159 { 0x20000000, 0x00001000 },
160 { 0x40000000, 0x00012000 },
161 { 0x80000000, 0x00012000 },
[49374]162// Need 0xc0000000..0xc001106f (at least), but trouble on solaris w/ 10h and 0fh family cpus:
163// { 0xc0000000, 0x00022000 },
164 { 0xc0000000, 0x00010000 },
[49375]165 { 0xc0010000, 0x00001040 },
[49383]166 { 0xc0011040, 0x00004040 }, /* should cause trouble... */
[49372]167 };
[49164]168 uint32_t cMsrsFound = 0;
[49372]169 int rc = VINF_SUCCESS;
170 for (unsigned i = 0; i < RT_ELEMENTS(s_aRanges) && RT_SUCCESS(rc); i++)
[49367]171 {
[49383]172//if (i >= 3)
173//{
174//RTStrmFlush(g_pStdOut);
175//RTThreadSleep(40);
176//}
[49372]177 rc = vmmR3ReportMsrRange(pVM, s_aRanges[i].uFirst, s_aRanges[i].cMsrs, pReportStrm, &cMsrsFound);
178 }
179
[49367]180 if (pReportStrm)
181 RTStrmPrintf(pReportStrm, "}; /* %u (%#x) MSRs; rc=%Rrc */\n", cMsrsFound, cMsrsFound, rc);
[49164]182 RTPrintf("Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
183 RTPrintf("=== MSR Quick Report End (rc=%Rrc, %'llu ns) ===\n", rc, RTTimeNanoTS() - uTsStart);
184 return rc;
185}
186
187
188/**
[1]189 * Performs a testcase.
190 *
191 * @returns return value from the test.
[58122]192 * @param pVM The cross context VM structure.
[1]193 * @param enmTestcase The testcase operation to perform.
194 * @param uVariation The testcase variation id.
195 */
[56286]196static int vmmR3DoGCTest(PVM pVM, VMMRCOPERATION enmTestcase, unsigned uVariation)
[1]197{
[18927]198 PVMCPU pVCpu = &pVM->aCpus[0];
199
[13813]200 RTRCPTR RCPtrEP;
[56286]201 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
[13816]202 if (RT_FAILURE(rc))
[1]203 return rc;
204
[47689]205 Log(("vmmR3DoGCTest: %d %#x\n", enmTestcase, uVariation));
[41985]206 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
207 vmmR3TestClearStack(pVCpu);
[18927]208 CPUMPushHyper(pVCpu, uVariation);
209 CPUMPushHyper(pVCpu, enmTestcase);
210 CPUMPushHyper(pVCpu, pVM->pVMRC);
211 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
212 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
213 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
[20864]214 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
[47689]215
[63429]216# if 1
[47689]217 /* flush the raw-mode logs. */
[63429]218# ifdef LOG_ENABLED
[47689]219 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
220 if ( pLogger
221 && pLogger->offScratch > 0)
222 RTLogFlushRC(NULL, pLogger);
[63429]223# endif
224# ifdef VBOX_WITH_RC_RELEASE_LOGGING
[47689]225 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
226 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
[55980]227 RTLogFlushRC(RTLogRelGetDefaultInstance(), pRelLogger);
[63429]228# endif
[47689]229# endif
230
231 Log(("vmmR3DoGCTest: rc=%Rrc iLastGZRc=%Rrc\n", rc, pVCpu->vmm.s.iLastGZRc));
[10723]232 if (RT_LIKELY(rc == VINF_SUCCESS))
[19462]233 rc = pVCpu->vmm.s.iLastGZRc;
[10723]234 return rc;
[1]235}
236
237
238/**
239 * Performs a trap test.
240 *
241 * @returns Return value from the trap test.
[58122]242 * @param pVM The cross context VM structure.
[1]243 * @param u8Trap The trap number to test.
244 * @param uVariation The testcase variation.
245 * @param rcExpect The expected result.
246 * @param u32Eax The expected eax value.
247 * @param pszFaultEIP The fault address. Pass NULL if this isn't available or doesn't apply.
248 * @param pszDesc The test description.
249 */
250static int vmmR3DoTrapTest(PVM pVM, uint8_t u8Trap, unsigned uVariation, int rcExpect, uint32_t u32Eax, const char *pszFaultEIP, const char *pszDesc)
251{
[18927]252 PVMCPU pVCpu = &pVM->aCpus[0];
253
[1]254 RTPrintf("VMM: testing 0%x / %d - %s\n", u8Trap, uVariation, pszDesc);
255
[13813]256 RTRCPTR RCPtrEP;
[56286]257 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
[13816]258 if (RT_FAILURE(rc))
[1]259 return rc;
260
[41985]261 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
262 vmmR3TestClearStack(pVCpu);
[18927]263 CPUMPushHyper(pVCpu, uVariation);
[56286]264 CPUMPushHyper(pVCpu, u8Trap + VMMRC_DO_TESTCASE_TRAP_FIRST);
[18927]265 CPUMPushHyper(pVCpu, pVM->pVMRC);
266 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
267 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
268 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
[20864]269 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
[10723]270 if (RT_LIKELY(rc == VINF_SUCCESS))
[19462]271 rc = pVCpu->vmm.s.iLastGZRc;
[1]272 bool fDump = false;
273 if (rc != rcExpect)
274 {
[13818]275 RTPrintf("VMM: FAILURE - rc=%Rrc expected %Rrc\n", rc, rcExpect);
[1]276 if (rc != VERR_NOT_IMPLEMENTED)
277 fDump = true;
278 }
[988]279 else if ( rcExpect != VINF_SUCCESS
280 && u8Trap != 8 /* double fault doesn't dare set TrapNo. */
[1]281 && u8Trap != 3 /* guest only, we're not in guest. */
282 && u8Trap != 1 /* guest only, we're not in guest. */
[19015]283 && u8Trap != TRPMGetTrapNo(pVCpu))
[1]284 {
[19015]285 RTPrintf("VMM: FAILURE - Trap %#x expected %#x\n", TRPMGetTrapNo(pVCpu), u8Trap);
[1]286 fDump = true;
287 }
288 else if (pszFaultEIP)
289 {
[13813]290 RTRCPTR RCPtrFault;
[56051]291 int rc2 = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, pszFaultEIP, &RCPtrFault);
[13816]292 if (RT_FAILURE(rc2))
[13818]293 RTPrintf("VMM: FAILURE - Failed to resolve symbol '%s', %Rrc!\n", pszFaultEIP, rc);
[18927]294 else if (RCPtrFault != CPUMGetHyperEIP(pVCpu))
[1]295 {
[18927]296 RTPrintf("VMM: FAILURE - EIP=%08RX32 expected %RRv (%s)\n", CPUMGetHyperEIP(pVCpu), RCPtrFault, pszFaultEIP);
[1]297 fDump = true;
298 }
299 }
[988]300 else if (rcExpect != VINF_SUCCESS)
[1]301 {
[18927]302 if (CPUMGetHyperSS(pVCpu) == SELMGetHyperDS(pVM))
303 RTPrintf("VMM: FAILURE - ss=%x expected %x\n", CPUMGetHyperSS(pVCpu), SELMGetHyperDS(pVM));
304 if (CPUMGetHyperES(pVCpu) == SELMGetHyperDS(pVM))
305 RTPrintf("VMM: FAILURE - es=%x expected %x\n", CPUMGetHyperES(pVCpu), SELMGetHyperDS(pVM));
306 if (CPUMGetHyperDS(pVCpu) == SELMGetHyperDS(pVM))
307 RTPrintf("VMM: FAILURE - ds=%x expected %x\n", CPUMGetHyperDS(pVCpu), SELMGetHyperDS(pVM));
308 if (CPUMGetHyperFS(pVCpu) == SELMGetHyperDS(pVM))
309 RTPrintf("VMM: FAILURE - fs=%x expected %x\n", CPUMGetHyperFS(pVCpu), SELMGetHyperDS(pVM));
310 if (CPUMGetHyperGS(pVCpu) == SELMGetHyperDS(pVM))
311 RTPrintf("VMM: FAILURE - gs=%x expected %x\n", CPUMGetHyperGS(pVCpu), SELMGetHyperDS(pVM));
312 if (CPUMGetHyperEDI(pVCpu) == 0x01234567)
313 RTPrintf("VMM: FAILURE - edi=%x expected %x\n", CPUMGetHyperEDI(pVCpu), 0x01234567);
314 if (CPUMGetHyperESI(pVCpu) == 0x42000042)
315 RTPrintf("VMM: FAILURE - esi=%x expected %x\n", CPUMGetHyperESI(pVCpu), 0x42000042);
316 if (CPUMGetHyperEBP(pVCpu) == 0xffeeddcc)
317 RTPrintf("VMM: FAILURE - ebp=%x expected %x\n", CPUMGetHyperEBP(pVCpu), 0xffeeddcc);
318 if (CPUMGetHyperEBX(pVCpu) == 0x89abcdef)
319 RTPrintf("VMM: FAILURE - ebx=%x expected %x\n", CPUMGetHyperEBX(pVCpu), 0x89abcdef);
320 if (CPUMGetHyperECX(pVCpu) == 0xffffaaaa)
321 RTPrintf("VMM: FAILURE - ecx=%x expected %x\n", CPUMGetHyperECX(pVCpu), 0xffffaaaa);
322 if (CPUMGetHyperEDX(pVCpu) == 0x77778888)
323 RTPrintf("VMM: FAILURE - edx=%x expected %x\n", CPUMGetHyperEDX(pVCpu), 0x77778888);
324 if (CPUMGetHyperEAX(pVCpu) == u32Eax)
325 RTPrintf("VMM: FAILURE - eax=%x expected %x\n", CPUMGetHyperEAX(pVCpu), u32Eax);
[1]326 }
327 if (fDump)
[18927]328 VMMR3FatalDump(pVM, pVCpu, rc);
[1]329 return rc;
330}
331
[43864]332#endif /* VBOX_WITH_RAW_MODE */
[1]333
[43864]334
[1]335/* execute the switch. */
336VMMR3DECL(int) VMMDoTest(PVM pVM)
337{
[43864]338 int rc = VINF_SUCCESS;
339
340#ifdef VBOX_WITH_RAW_MODE
[18927]341 PVMCPU pVCpu = &pVM->aCpus[0];
[44399]342 PUVM pUVM = pVM->pUVM;
[18927]343
[43864]344# ifdef NO_SUPCALLR0VMM
[1]345 RTPrintf("NO_SUPCALLR0VMM\n");
[43864]346 return rc;
347# endif
[1]348
349 /*
[56286]350 * Setup stack for calling VMMRCEntry().
[1]351 */
[13813]352 RTRCPTR RCPtrEP;
[56286]353 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
[13816]354 if (RT_SUCCESS(rc))
[1]355 {
[56286]356 RTPrintf("VMM: VMMRCEntry=%RRv\n", RCPtrEP);
[914]357
[1]358 /*
359 * Test various crashes which we must be able to recover from.
360 */
361 vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3");
362 vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP");
363
[47843]364# if 0//defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */
[1]365 vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]");
366 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
367 bool f;
368 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
[43864]369# if !defined(DEBUG_bird)
[13816]370 if (RT_SUCCESS(rc) && f)
[43864]371# endif
[1]372 {
[56286]373 /* see triple fault warnings in SELM and VMMRC.cpp. */
[1]374 vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP");
375 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
376 }
[43864]377# endif
[1]378
379 vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP");
[63560]380 /// @todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP.
[1]381 //vmmR3DoTrapTest(pVM, 0xd, 1, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP WP");
382
383 vmmR3DoTrapTest(pVM, 0xe, 0, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL)");
384 vmmR3DoTrapTest(pVM, 0xe, 1, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL) WP");
[988]385 vmmR3DoTrapTest(pVM, 0xe, 2, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler");
[41985]386 /* This test is no longer relevant as fs and gs are loaded with NULL
387 selectors and we will always return to HC if a #GP occurs while
388 returning to guest code.
[988]389 vmmR3DoTrapTest(pVM, 0xe, 4, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler and bad fs");
[41985]390 */
[1]391
392 /*
393 * Set a debug register and perform a context switch.
394 */
[56286]395 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
[1]396 if (rc != VINF_SUCCESS)
397 {
[13818]398 RTPrintf("VMM: Nop test failed, rc=%Rrc not VINF_SUCCESS\n", rc);
[47689]399 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
[1]400 }
401
402 /* a harmless breakpoint */
403 RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n");
404 DBGFADDRESS Addr;
[44399]405 DBGFR3AddrFromFlat(pUVM, &Addr, 0x10000);
[1]406 RTUINT iBp0;
[44399]407 rc = DBGFR3BpSetReg(pUVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0);
[1]408 AssertReleaseRC(rc);
[56286]409 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
[1]410 if (rc != VINF_SUCCESS)
411 {
[13818]412 RTPrintf("VMM: DR0=0x10000 test failed with rc=%Rrc!\n", rc);
[47689]413 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
[1]414 }
415
[56286]416 /* a bad one at VMMRCEntry */
417 RTPrintf("VMM: testing hardware bp at VMMRCEntry (hit)\n");
[44399]418 DBGFR3AddrFromFlat(pUVM, &Addr, RCPtrEP);
[1]419 RTUINT iBp1;
[44399]420 rc = DBGFR3BpSetReg(pUVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1);
[1]421 AssertReleaseRC(rc);
[56286]422 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
[1]423 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
424 {
[56286]425 RTPrintf("VMM: DR1=VMMRCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
[47689]426 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
[1]427 }
428
429 /* resume the breakpoint */
430 RTPrintf("VMM: resuming hyper after breakpoint\n");
[18927]431 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);
432 rc = VMMR3ResumeHyper(pVM, pVCpu);
[1]433 if (rc != VINF_SUCCESS)
434 {
[41985]435 RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Rrc = KNOWN BUG\n", rc); /** @todo fix VMMR3ResumeHyper */
[47689]436 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
[1]437 }
438
439 /* engage the breakpoint again and try single stepping. */
[56286]440 RTPrintf("VMM: testing hardware bp at VMMRCEntry + stepping\n");
441 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
[1]442 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
443 {
[56286]444 RTPrintf("VMM: DR1=VMMRCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
[47689]445 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
[1]446 }
447
[18927]448 RTGCUINTREG OldPc = CPUMGetHyperEIP(pVCpu);
[1]449 RTPrintf("%RGr=>", OldPc);
450 unsigned i;
451 for (i = 0; i < 8; i++)
452 {
[18927]453 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
454 rc = VMMR3ResumeHyper(pVM, pVCpu);
[1]455 if (rc != VINF_EM_DBG_HYPER_STEPPED)
456 {
[13818]457 RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Rrc\n", rc);
[47689]458 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
[1]459 }
[18927]460 RTGCUINTREG Pc = CPUMGetHyperEIP(pVCpu);
[1]461 RTPrintf("%RGr=>", Pc);
462 if (Pc == OldPc)
463 {
464 RTPrintf("\nVMM: step failed, PC: %RGr -> %RGr\n", OldPc, Pc);
465 return VERR_GENERAL_FAILURE;
466 }
467 OldPc = Pc;
468 }
469 RTPrintf("ok\n");
470
471 /* done, clear it */
[44399]472 if ( RT_FAILURE(DBGFR3BpClear(pUVM, iBp0))
473 || RT_FAILURE(DBGFR3BpClear(pUVM, iBp1)))
[1]474 {
475 RTPrintf("VMM: Failed to clear breakpoints!\n");
476 return VERR_GENERAL_FAILURE;
477 }
[56286]478 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_NOP, 0);
[1]479 if (rc != VINF_SUCCESS)
480 {
[13818]481 RTPrintf("VMM: NOP failed, rc=%Rrc\n", rc);
[47689]482 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
[1]483 }
484
485 /*
[47843]486 * Interrupt masking. Failure may indiate NMI watchdog activity.
[847]487 */
488 RTPrintf("VMM: interrupt masking...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
489 for (i = 0; i < 10000; i++)
490 {
491 uint64_t StartTick = ASMReadTSC();
[56286]492 rc = vmmR3DoGCTest(pVM, VMMRC_DO_TESTCASE_INTERRUPT_MASKING, 0);
[847]493 if (rc != VINF_SUCCESS)
494 {
[13818]495 RTPrintf("VMM: Interrupt masking failed: rc=%Rrc\n", rc);
[47689]496 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
[847]497 }
498 uint64_t Ticks = ASMReadTSC() - StartTick;
[54308]499 if (Ticks < (SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) / 10000))
500 RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) / 10000);
[847]501 }
502
503 /*
[1]504 * Interrupt forwarding.
505 */
[41985]506 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
[18927]507 CPUMPushHyper(pVCpu, 0);
[56286]508 CPUMPushHyper(pVCpu, VMMRC_DO_TESTCASE_HYPER_INTERRUPT);
[18927]509 CPUMPushHyper(pVCpu, pVM->pVMRC);
510 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
511 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
[13714]512 Log(("trampoline=%x\n", pVM->vmm.s.pfnCallTrampolineRC));
[1]513
514 /*
515 * Switch and do da thing.
516 */
[419]517 RTPrintf("VMM: interrupt forwarding...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
[1]518 i = 0;
519 uint64_t tsBegin = RTTimeNanoTS();
520 uint64_t TickStart = ASMReadTSC();
[18927]521 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
[1]522 do
523 {
[20864]524 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
[10723]525 if (RT_LIKELY(rc == VINF_SUCCESS))
[19462]526 rc = pVCpu->vmm.s.iLastGZRc;
[13816]527 if (RT_FAILURE(rc))
[1]528 {
[13818]529 Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i));
[18927]530 VMMR3FatalDump(pVM, pVCpu, rc);
[1]531 return rc;
532 }
533 i++;
534 if (!(i % 32))
535 Log(("VMM: iteration %d, esi=%08x edi=%08x ebx=%08x\n",
[18927]536 i, CPUMGetHyperESI(pVCpu), CPUMGetHyperEDI(pVCpu), CPUMGetHyperEBX(pVCpu)));
[421]537 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
[1]538 uint64_t TickEnd = ASMReadTSC();
539 uint64_t tsEnd = RTTimeNanoTS();
540
541 uint64_t Elapsed = tsEnd - tsBegin;
542 uint64_t PerIteration = Elapsed / (uint64_t)i;
543 uint64_t cTicksElapsed = TickEnd - TickStart;
544 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
545
546 RTPrintf("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
547 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration);
548 Log(("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
549 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration));
550
551 /*
552 * These forced actions are not necessary for the test and trigger breakpoints too.
553 */
[19141]554 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
555 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
[1]556
557 /*
558 * Profile switching.
559 */
560 RTPrintf("VMM: profiling switcher...\n");
561 Log(("VMM: profiling switcher...\n"));
[62647]562 uint64_t TickMin = UINT64_MAX;
[1]563 tsBegin = RTTimeNanoTS();
564 TickStart = ASMReadTSC();
[18927]565 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
[1]566 for (i = 0; i < 1000000; i++)
567 {
[41985]568 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
[18927]569 CPUMPushHyper(pVCpu, 0);
[56286]570 CPUMPushHyper(pVCpu, VMMRC_DO_TESTCASE_NOP);
[18927]571 CPUMPushHyper(pVCpu, pVM->pVMRC);
572 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
573 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
[1]574
575 uint64_t TickThisStart = ASMReadTSC();
[20864]576 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
[10723]577 if (RT_LIKELY(rc == VINF_SUCCESS))
[19462]578 rc = pVCpu->vmm.s.iLastGZRc;
[1]579 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
[13816]580 if (RT_FAILURE(rc))
[1]581 {
[13818]582 Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i));
[18927]583 VMMR3FatalDump(pVM, pVCpu, rc);
[1]584 return rc;
585 }
586 if (TickThisElapsed < TickMin)
587 TickMin = TickThisElapsed;
588 }
589 TickEnd = ASMReadTSC();
590 tsEnd = RTTimeNanoTS();
591
592 Elapsed = tsEnd - tsBegin;
593 PerIteration = Elapsed / (uint64_t)i;
594 cTicksElapsed = TickEnd - TickStart;
595 cTicksPerIteration = cTicksElapsed / (uint64_t)i;
596
597 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
598 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
599 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
600 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
601
602 rc = VINF_SUCCESS;
[49164]603
[63429]604# if 0 /* drop this for now as it causes trouble on AMDs (Opteron 2384 and possibly others). */
[49164]605 /*
606 * A quick MSR report.
607 */
[49367]608 vmmR3DoMsrQuickReport(pVM, NULL, true);
[63429]609# endif
[1]610 }
611 else
[56286]612 AssertMsgFailed(("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc));
[63429]613#else /* !VBOX_WITH_RAW_MODE */
614 RT_NOREF(pVM);
615#endif /* !VBOX_WITH_RAW_MODE */
[1]616 return rc;
617}
618
[1248]619#define SYNC_SEL(pHyperCtx, reg) \
[41906]620 if (pHyperCtx->reg.Sel) \
[1248]621 { \
[19334]622 DBGFSELINFO selInfo; \
[41906]623 int rc2 = SELMR3GetShadowSelectorInfo(pVM, pHyperCtx->reg.Sel, &selInfo); \
[25239]624 AssertRC(rc2); \
[1248]625 \
[41906]626 pHyperCtx->reg.u64Base = selInfo.GCPtrBase; \
627 pHyperCtx->reg.u32Limit = selInfo.cbLimit; \
628 pHyperCtx->reg.Attr.n.u1Present = selInfo.u.Raw.Gen.u1Present; \
629 pHyperCtx->reg.Attr.n.u1DefBig = selInfo.u.Raw.Gen.u1DefBig; \
630 pHyperCtx->reg.Attr.n.u1Granularity = selInfo.u.Raw.Gen.u1Granularity; \
631 pHyperCtx->reg.Attr.n.u4Type = selInfo.u.Raw.Gen.u4Type; \
632 pHyperCtx->reg.Attr.n.u2Dpl = selInfo.u.Raw.Gen.u2Dpl; \
633 pHyperCtx->reg.Attr.n.u1DescType = selInfo.u.Raw.Gen.u1DescType; \
634 pHyperCtx->reg.Attr.n.u1Long = selInfo.u.Raw.Gen.u1Long; \
[1248]635 }
636
[1210]637/* execute the switch. */
[43391]638VMMR3DECL(int) VMMDoHmTest(PVM pVM)
[1210]639{
640 uint32_t i;
[1257]641 int rc;
642 PCPUMCTX pHyperCtx, pGuestCtx;
[1269]643 RTGCPHYS CR3Phys = 0x0; /* fake address */
[18927]644 PVMCPU pVCpu = &pVM->aCpus[0];
[1210]645
[45618]646 if (!HMIsEnabled(pVM))
[1210]647 {
[1239]648 RTPrintf("VMM: Hardware accelerated test not available!\n");
[1210]649 return VERR_ACCESS_DENIED;
650 }
651
[45533]652#ifdef VBOX_WITH_RAW_MODE
[1210]653 /*
654 * These forced actions are not necessary for the test and trigger breakpoints too.
655 */
[19141]656 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
657 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
[45533]658#endif
[1210]659
660 /* Enable mapping of the hypervisor into the shadow page table. */
[16408]661 uint32_t cb;
662 rc = PGMR3MappingsSize(pVM, &cb);
663 AssertRCReturn(rc, rc);
[1210]664
[16408]665 /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
666 rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb);
667 AssertRCReturn(rc, rc);
668
[41931]669 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
[1257]670
671 pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
[54862]672 pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFXSR | X86_CR4_OSXMMEEXCPT;
[18992]673 PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER);
674 PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);
[1257]675
[19141]676 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
[19660]677 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
678 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
[1269]679 VM_FF_CLEAR(pVM, VM_FF_REQUEST);
680
[1210]681 /*
[56286]682 * Setup stack for calling VMMRCEntry().
[1210]683 */
[13813]684 RTRCPTR RCPtrEP;
[56286]685 rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCEntry", &RCPtrEP);
[13816]686 if (RT_SUCCESS(rc))
[1210]687 {
[56286]688 RTPrintf("VMM: VMMRCEntry=%RRv\n", RCPtrEP);
[1248]689
[41931]690 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
[1248]691
692 /* Fill in hidden selector registers for the hypervisor state. */
693 SYNC_SEL(pHyperCtx, cs);
694 SYNC_SEL(pHyperCtx, ds);
695 SYNC_SEL(pHyperCtx, es);
696 SYNC_SEL(pHyperCtx, fs);
697 SYNC_SEL(pHyperCtx, gs);
698 SYNC_SEL(pHyperCtx, ss);
[1249]699 SYNC_SEL(pHyperCtx, tr);
[1248]700
[1210]701 /*
702 * Profile switching.
703 */
704 RTPrintf("VMM: profiling switcher...\n");
705 Log(("VMM: profiling switcher...\n"));
[62647]706 uint64_t TickMin = UINT64_MAX;
[1210]707 uint64_t tsBegin = RTTimeNanoTS();
708 uint64_t TickStart = ASMReadTSC();
709 for (i = 0; i < 1000000; i++)
710 {
[41985]711 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
[18927]712 CPUMPushHyper(pVCpu, 0);
[56286]713 CPUMPushHyper(pVCpu, VMMRC_DO_TESTCASE_HM_NOP);
[18927]714 CPUMPushHyper(pVCpu, pVM->pVMRC);
715 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
716 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
[1210]717
[41931]718 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
[18927]719 pGuestCtx = CPUMQueryGuestCtxPtr(pVCpu);
[1210]720
721 /* Copy the hypervisor context to make sure we have a valid guest context. */
722 *pGuestCtx = *pHyperCtx;
[1269]723 pGuestCtx->cr3 = CR3Phys;
[1210]724
[19141]725 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
[19660]726 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
727 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
[1248]728
[1210]729 uint64_t TickThisStart = ASMReadTSC();
[43394]730 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, 0);
[1210]731 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
[13816]732 if (RT_FAILURE(rc))
[1210]733 {
[13818]734 Log(("VMM: R0 returned fatal %Rrc in iteration %d\n", rc, i));
[18927]735 VMMR3FatalDump(pVM, pVCpu, rc);
[1210]736 return rc;
737 }
738 if (TickThisElapsed < TickMin)
739 TickMin = TickThisElapsed;
740 }
741 uint64_t TickEnd = ASMReadTSC();
742 uint64_t tsEnd = RTTimeNanoTS();
743
744 uint64_t Elapsed = tsEnd - tsBegin;
745 uint64_t PerIteration = Elapsed / (uint64_t)i;
746 uint64_t cTicksElapsed = TickEnd - TickStart;
747 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
748
749 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
750 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
751 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
752 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
753
754 rc = VINF_SUCCESS;
755 }
756 else
[56286]757 AssertMsgFailed(("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc));
[1210]758
759 return rc;
760}
[1313]761
[49141]762
763#ifdef VBOX_WITH_RAW_MODE
764
765/**
766 * Used by VMMDoBruteForceMsrs to dump the CPUID info of the host CPU as a
767 * prefix to the MSR report.
768 */
769static DECLCALLBACK(void) vmmDoPrintfVToStream(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list va)
770{
771 PRTSTREAM pOutStrm = ((PRTSTREAM *)pHlp)[-1];
772 RTStrmPrintfV(pOutStrm, pszFormat, va);
773}
774
775/**
776 * Used by VMMDoBruteForceMsrs to dump the CPUID info of the host CPU as a
777 * prefix to the MSR report.
778 */
779static DECLCALLBACK(void) vmmDoPrintfToStream(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
780{
781 va_list va;
782 va_start(va, pszFormat);
783 vmmDoPrintfVToStream(pHlp, pszFormat, va);
784 va_end(va);
785}
786
787#endif
788
789
790/**
791 * Uses raw-mode to query all possible MSRs on the real hardware.
792 *
793 * This generates a msr-report.txt file (appending, no overwriting) as well as
794 * writing the values and process to stdout.
795 *
796 * @returns VBox status code.
[58122]797 * @param pVM The cross context VM structure.
[49141]798 */
[49367]799VMMR3DECL(int) VMMDoBruteForceMsrs(PVM pVM)
[49141]800{
801#ifdef VBOX_WITH_RAW_MODE
[49164]802 PRTSTREAM pOutStrm;
803 int rc = RTStrmOpen("msr-report.txt", "a", &pOutStrm);
[49141]804 if (RT_SUCCESS(rc))
805 {
[49164]806 /* Header */
807 struct
[49141]808 {
[49164]809 PRTSTREAM pOutStrm;
810 DBGFINFOHLP Hlp;
811 } MyHlp = { pOutStrm, { vmmDoPrintfToStream, vmmDoPrintfVToStream } };
812 DBGFR3Info(pVM->pUVM, "cpuid", "verbose", &MyHlp.Hlp);
813 RTStrmPrintf(pOutStrm, "\n");
[49141]814
[49164]815 uint32_t cMsrsFound = 0;
816 vmmR3ReportMsrRange(pVM, 0, _4G, pOutStrm, &cMsrsFound);
[49141]817
[49164]818 RTStrmPrintf(pOutStrm, "Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
819 RTPrintf("Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
[49141]820
[49164]821 RTStrmClose(pOutStrm);
[49141]822 }
823 return rc;
824#else
[63429]825 RT_NOREF(pVM);
[49141]826 return VERR_NOT_SUPPORTED;
827#endif
828}
829
[49367]830
831/**
832 * Uses raw-mode to query all known MSRS on the real hardware.
833 *
834 * This generates a known-msr-report.txt file (appending, no overwriting) as
835 * well as writing the values and process to stdout.
836 *
837 * @returns VBox status code.
[58122]838 * @param pVM The cross context VM structure.
[49367]839 */
840VMMR3DECL(int) VMMDoKnownMsrs(PVM pVM)
841{
842#ifdef VBOX_WITH_RAW_MODE
843 PRTSTREAM pOutStrm;
844 int rc = RTStrmOpen("known-msr-report.txt", "a", &pOutStrm);
845 if (RT_SUCCESS(rc))
846 {
847 vmmR3DoMsrQuickReport(pVM, pOutStrm, false);
848 RTStrmClose(pOutStrm);
849 }
850 return rc;
851#else
[63429]852 RT_NOREF(pVM);
[49367]853 return VERR_NOT_SUPPORTED;
854#endif
855}
856
857
858/**
859 * MSR experimentation.
860 *
861 * @returns VBox status code.
[58122]862 * @param pVM The cross context VM structure.
[49367]863 */
864VMMR3DECL(int) VMMDoMsrExperiments(PVM pVM)
865{
866#ifdef VBOX_WITH_RAW_MODE
867 /*
868 * Preps.
869 */
870 RTRCPTR RCPtrEP;
[56051]871 int rc = PDMR3LdrGetSymbolRC(pVM, VMMRC_MAIN_MODULE_NAME, "VMMRCTestTestWriteMsr", &RCPtrEP);
[49367]872 AssertMsgRCReturn(rc, ("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc), rc);
873
874 uint64_t *pauValues;
875 rc = MMHyperAlloc(pVM, 2 * sizeof(uint64_t), 0, MM_TAG_VMM, (void **)&pauValues);
876 AssertMsgRCReturn(rc, ("Error allocating %#x bytes off the hyper heap: %Rrc\n", 2 * sizeof(uint64_t), rc), rc);
877 RTRCPTR RCPtrValues = MMHyperR3ToRC(pVM, pauValues);
878
879 /*
880 * Do the experiments.
881 */
[49893]882 uint32_t uMsr = 0x00000277;
883 uint64_t uValue = UINT64_C(0x0007010600070106);
[63429]884# if 0
[49893]885 uValue &= ~(RT_BIT_64(17) | RT_BIT_64(16) | RT_BIT_64(15) | RT_BIT_64(14) | RT_BIT_64(13));
886 uValue |= RT_BIT_64(13);
[49367]887 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
888 RCPtrValues, RCPtrValues + sizeof(uint64_t));
889 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
890 uMsr, pauValues[0], uValue, pauValues[1], rc);
[63429]891# elif 1
[49893]892 const uint64_t uOrgValue = uValue;
893 uint32_t cChanges = 0;
894 for (int iBit = 63; iBit >= 58; iBit--)
895 {
896 uValue = uOrgValue & ~RT_BIT_64(iBit);
897 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
898 RCPtrValues, RCPtrValues + sizeof(uint64_t));
899 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nclear bit=%u -> %s\n",
900 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
901 (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged");
902 cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
903
904 uValue = uOrgValue | RT_BIT_64(iBit);
905 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
906 RCPtrValues, RCPtrValues + sizeof(uint64_t));
907 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nset bit=%u -> %s\n",
908 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
909 (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged");
910 cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
911 }
912 RTPrintf("%u change(s)\n", cChanges);
[63429]913# else
[49893]914 uint64_t fWriteable = 0;
[49367]915 for (uint32_t i = 0; i <= 63; i++)
916 {
917 uValue = RT_BIT_64(i);
[49893]918# if 0
919 if (uValue & (0x7))
920 continue;
921# endif
[49367]922 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
923 RCPtrValues, RCPtrValues + sizeof(uint64_t));
924 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
925 uMsr, pauValues[0], uValue, pauValues[1], rc);
[49893]926 if (RT_SUCCESS(rc))
927 fWriteable |= RT_BIT_64(i);
[49367]928 }
929
930 uValue = 0;
931 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
932 RCPtrValues, RCPtrValues + sizeof(uint64_t));
933 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
934 uMsr, pauValues[0], uValue, pauValues[1], rc);
935
936 uValue = UINT64_MAX;
937 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
938 RCPtrValues, RCPtrValues + sizeof(uint64_t));
939 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
940 uMsr, pauValues[0], uValue, pauValues[1], rc);
941
[49893]942 uValue = fWriteable;
943 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
944 RCPtrValues, RCPtrValues + sizeof(uint64_t));
945 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc [fWriteable]\n",
946 uMsr, pauValues[0], uValue, pauValues[1], rc);
947
[63429]948# endif
[49893]949
[49367]950 /*
951 * Cleanups.
952 */
953 MMHyperFree(pVM, pauValues);
954 return rc;
955#else
[63429]956 RT_NOREF(pVM);
[49367]957 return VERR_NOT_SUPPORTED;
958#endif
959}
960
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use