VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GIMAllHv.cpp@ 96860

Last change on this file since 96860 was 96407, checked in by vboxsync, 21 months ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 57.0 KB
Line 
1/* $Id: GIMAllHv.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * GIM - Guest Interface Manager, Microsoft Hyper-V, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2014-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_GIM
33#include <VBox/vmm/gim.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/hm.h>
36#include <VBox/vmm/tm.h>
37#include <VBox/vmm/dbgf.h>
38#include <VBox/vmm/pdmdev.h>
39#include <VBox/vmm/pdmapi.h>
40#include <VBox/vmm/pgm.h>
41#include <VBox/vmm/apic.h>
42#include <VBox/vmm/em.h>
43#include "GIMHvInternal.h"
44#include "GIMInternal.h"
45#include <VBox/vmm/vmcc.h>
46
47#include <VBox/err.h>
48
49#ifdef IN_RING3
50# include <iprt/mem.h>
51#endif
52
53
54#ifdef IN_RING3
55/**
56 * Read and validate slow hypercall parameters.
57 *
58 * @returns VBox status code.
59 * @param pVM The cross context VM structure.
60 * @param pCtx Pointer to the guest-CPU context.
61 * @param fIs64BitMode Whether the guest is currently in 64-bit mode or not.
62 * @param enmParam The hypercall parameter type.
63 * @param prcHv Where to store the Hyper-V status code. Only valid
64 * to the caller when this function returns
65 * VINF_SUCCESS.
66 */
67static int gimHvReadSlowHypercallParam(PVM pVM, PCPUMCTX pCtx, bool fIs64BitMode, GIMHVHYPERCALLPARAM enmParam, int *prcHv)
68{
69 int rc = VINF_SUCCESS;
70 PGIMHV pHv = &pVM->gim.s.u.Hv;
71 RTGCPHYS GCPhysParam;
72 void *pvDst;
73 if (enmParam == GIMHVHYPERCALLPARAM_IN)
74 {
75 GCPhysParam = fIs64BitMode ? pCtx->rdx : (pCtx->rbx << 32) | pCtx->ecx;
76 pvDst = pHv->pbHypercallIn;
77 pHv->GCPhysHypercallIn = GCPhysParam;
78 }
79 else
80 {
81 GCPhysParam = fIs64BitMode ? pCtx->r8 : (pCtx->rdi << 32) | pCtx->esi;
82 pvDst = pHv->pbHypercallOut;
83 pHv->GCPhysHypercallOut = GCPhysParam;
84 Assert(enmParam == GIMHVHYPERCALLPARAM_OUT);
85 }
86
87 const char *pcszParam = enmParam == GIMHVHYPERCALLPARAM_IN ? "input" : "output"; NOREF(pcszParam);
88 if (RT_ALIGN_64(GCPhysParam, 8) == GCPhysParam)
89 {
90 if (PGMPhysIsGCPhysNormal(pVM, GCPhysParam))
91 {
92 rc = PGMPhysSimpleReadGCPhys(pVM, pvDst, GCPhysParam, GIM_HV_PAGE_SIZE);
93 if (RT_SUCCESS(rc))
94 {
95 *prcHv = GIM_HV_STATUS_SUCCESS;
96 return VINF_SUCCESS;
97 }
98 LogRel(("GIM: HyperV: Failed reading %s param at %#RGp. rc=%Rrc\n", pcszParam, GCPhysParam, rc));
99 rc = VERR_GIM_HYPERCALL_MEMORY_READ_FAILED;
100 }
101 else
102 {
103 Log(("GIM: HyperV: Invalid %s param address %#RGp\n", pcszParam, GCPhysParam));
104 *prcHv = GIM_HV_STATUS_INVALID_PARAMETER;
105 }
106 }
107 else
108 {
109 Log(("GIM: HyperV: Misaligned %s param address %#RGp\n", pcszParam, GCPhysParam));
110 *prcHv = GIM_HV_STATUS_INVALID_ALIGNMENT;
111 }
112 return rc;
113}
114
115
116/**
117 * Helper for reading and validating slow hypercall input and output parameters.
118 *
119 * @returns VBox status code.
120 * @param pVM The cross context VM structure.
121 * @param pCtx Pointer to the guest-CPU context.
122 * @param fIs64BitMode Whether the guest is currently in 64-bit mode or not.
123 * @param prcHv Where to store the Hyper-V status code. Only valid
124 * to the caller when this function returns
125 * VINF_SUCCESS.
126 */
127static int gimHvReadSlowHypercallParamsInOut(PVM pVM, PCPUMCTX pCtx, bool fIs64BitMode, int *prcHv)
128{
129 int rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_IN, prcHv);
130 if ( RT_SUCCESS(rc)
131 && *prcHv == GIM_HV_STATUS_SUCCESS)
132 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_OUT, prcHv);
133 return rc;
134}
135#endif
136
137
138/**
139 * Handles all Hyper-V hypercalls.
140 *
141 * @returns Strict VBox status code.
142 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
143 * failed).
144 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
145 * @retval VERR_GIM_HYPERCALLS_NOT_ENABLED hypercalls are disabled by the
146 * guest.
147 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
148 * @retval VERR_GIM_HYPERCALL_MEMORY_READ_FAILED hypercall failed while reading
149 * memory.
150 * @retval VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED hypercall failed while
151 * writing memory.
152 *
153 * @param pVCpu The cross context virtual CPU structure.
154 * @param pCtx Pointer to the guest-CPU context.
155 *
156 * @thread EMT(pVCpu).
157 */
158VMM_INT_DECL(VBOXSTRICTRC) gimHvHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx)
159{
160 VMCPU_ASSERT_EMT(pVCpu);
161
162#ifndef IN_RING3
163 RT_NOREF_PV(pVCpu);
164 RT_NOREF_PV(pCtx);
165 return VINF_GIM_R3_HYPERCALL;
166#else
167 PVM pVM = pVCpu->CTX_SUFF(pVM);
168 STAM_REL_COUNTER_INC(&pVM->gim.s.StatHypercalls);
169
170 /*
171 * Verify that hypercalls are enabled by the guest.
172 */
173 if (!gimHvAreHypercallsEnabled(pVM))
174 return VERR_GIM_HYPERCALLS_NOT_ENABLED;
175
176 /*
177 * Verify guest is in ring-0 protected mode.
178 */
179 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
180 if ( uCpl
181 || CPUMIsGuestInRealModeEx(pCtx))
182 {
183 return VERR_GIM_HYPERCALL_ACCESS_DENIED;
184 }
185
186 /*
187 * Get the hypercall operation code and modes.
188 * Fast hypercalls have only two or fewer inputs but no output parameters.
189 */
190 const bool fIs64BitMode = CPUMIsGuestIn64BitCodeEx(pCtx);
191 const uint64_t uHyperIn = fIs64BitMode ? pCtx->rcx : (pCtx->rdx << 32) | pCtx->eax;
192 const uint16_t uHyperOp = GIM_HV_HYPERCALL_IN_CALL_CODE(uHyperIn);
193 const bool fHyperFast = GIM_HV_HYPERCALL_IN_IS_FAST(uHyperIn);
194 const uint16_t cHyperReps = GIM_HV_HYPERCALL_IN_REP_COUNT(uHyperIn);
195 const uint16_t idxHyperRepStart = GIM_HV_HYPERCALL_IN_REP_START_IDX(uHyperIn);
196 uint64_t cHyperRepsDone = 0;
197
198 /* Currently no repeating hypercalls are supported. */
199 RT_NOREF2(cHyperReps, idxHyperRepStart);
200
201 int rc = VINF_SUCCESS;
202 int rcHv = GIM_HV_STATUS_OPERATION_DENIED;
203 PGIMHV pHv = &pVM->gim.s.u.Hv;
204
205 /*
206 * Validate common hypercall input parameters.
207 */
208 if ( !GIM_HV_HYPERCALL_IN_RSVD_1(uHyperIn)
209 && !GIM_HV_HYPERCALL_IN_RSVD_2(uHyperIn)
210 && !GIM_HV_HYPERCALL_IN_RSVD_3(uHyperIn))
211 {
212 /*
213 * Perform the hypercall.
214 */
215 switch (uHyperOp)
216 {
217 case GIM_HV_HYPERCALL_OP_RETREIVE_DEBUG_DATA: /* Non-rep, memory IO. */
218 {
219 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_DEBUGGING)
220 {
221 rc = gimHvReadSlowHypercallParamsInOut(pVM, pCtx, fIs64BitMode, &rcHv);
222 if ( RT_SUCCESS(rc)
223 && rcHv == GIM_HV_STATUS_SUCCESS)
224 {
225 LogRelMax(1, ("GIM: HyperV: Initiated debug data reception via hypercall\n"));
226 rc = gimR3HvHypercallRetrieveDebugData(pVM, &rcHv);
227 if (RT_FAILURE(rc))
228 LogRelMax(10, ("GIM: HyperV: gimR3HvHypercallRetrieveDebugData failed. rc=%Rrc\n", rc));
229 }
230 }
231 else
232 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
233 break;
234 }
235
236 case GIM_HV_HYPERCALL_OP_POST_DEBUG_DATA: /* Non-rep, memory IO. */
237 {
238 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_DEBUGGING)
239 {
240 rc = gimHvReadSlowHypercallParamsInOut(pVM, pCtx, fIs64BitMode, &rcHv);
241 if ( RT_SUCCESS(rc)
242 && rcHv == GIM_HV_STATUS_SUCCESS)
243 {
244 LogRelMax(1, ("GIM: HyperV: Initiated debug data transmission via hypercall\n"));
245 rc = gimR3HvHypercallPostDebugData(pVM, &rcHv);
246 if (RT_FAILURE(rc))
247 LogRelMax(10, ("GIM: HyperV: gimR3HvHypercallPostDebugData failed. rc=%Rrc\n", rc));
248 }
249 }
250 else
251 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
252 break;
253 }
254
255 case GIM_HV_HYPERCALL_OP_RESET_DEBUG_SESSION: /* Non-rep, fast (register IO). */
256 {
257 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_DEBUGGING)
258 {
259 uint32_t fFlags = 0;
260 if (!fHyperFast)
261 {
262 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_IN, &rcHv);
263 if ( RT_SUCCESS(rc)
264 && rcHv == GIM_HV_STATUS_SUCCESS)
265 {
266 PGIMHVDEBUGRESETIN pIn = (PGIMHVDEBUGRESETIN)pHv->pbHypercallIn;
267 fFlags = pIn->fFlags;
268 }
269 }
270 else
271 {
272 rcHv = GIM_HV_STATUS_SUCCESS;
273 fFlags = fIs64BitMode ? pCtx->rdx : pCtx->ebx;
274 }
275
276 /*
277 * Nothing to flush on the sending side as we don't maintain our own buffers.
278 */
279 /** @todo We should probably ask the debug receive thread to flush it's buffer. */
280 if (rcHv == GIM_HV_STATUS_SUCCESS)
281 {
282 if (fFlags)
283 LogRel(("GIM: HyperV: Resetting debug session via hypercall\n"));
284 else
285 rcHv = GIM_HV_STATUS_INVALID_PARAMETER;
286 }
287 }
288 else
289 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
290 break;
291 }
292
293 case GIM_HV_HYPERCALL_OP_POST_MESSAGE: /* Non-rep, memory IO. */
294 {
295 if (pHv->fIsInterfaceVs)
296 {
297 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_IN, &rcHv);
298 if ( RT_SUCCESS(rc)
299 && rcHv == GIM_HV_STATUS_SUCCESS)
300 {
301 PGIMHVPOSTMESSAGEIN pMsgIn = (PGIMHVPOSTMESSAGEIN)pHv->pbHypercallIn;
302 PCGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
303 if ( pMsgIn->uConnectionId == GIM_HV_VMBUS_MSG_CONNECTION_ID
304 && pMsgIn->enmMessageType == GIMHVMSGTYPE_VMBUS
305 && !MSR_GIM_HV_SINT_IS_MASKED(pHvCpu->auSintMsrs[GIM_HV_VMBUS_MSG_SINT])
306 && MSR_GIM_HV_SIMP_IS_ENABLED(pHvCpu->uSimpMsr))
307 {
308 RTGCPHYS GCPhysSimp = MSR_GIM_HV_SIMP_GPA(pHvCpu->uSimpMsr);
309 if (PGMPhysIsGCPhysNormal(pVM, GCPhysSimp))
310 {
311 /*
312 * The VMBus client (guest) expects to see 0xf at offsets 4 and 16 and 1 at offset 0.
313 */
314 GIMHVMSG HvMsg;
315 RT_ZERO(HvMsg);
316 HvMsg.MsgHdr.enmMessageType = GIMHVMSGTYPE_VMBUS;
317 HvMsg.MsgHdr.cbPayload = 0xf;
318 HvMsg.aPayload[0] = 0xf;
319 uint16_t const offMsg = GIM_HV_VMBUS_MSG_SINT * sizeof(GIMHVMSG);
320 int rc2 = PGMPhysSimpleWriteGCPhys(pVM, GCPhysSimp + offMsg, &HvMsg, sizeof(HvMsg));
321 if (RT_SUCCESS(rc2))
322 LogRel(("GIM: HyperV: SIMP hypercall faking message at %#RGp:%u\n", GCPhysSimp, offMsg));
323 else
324 {
325 LogRel(("GIM: HyperV: Failed to write SIMP message at %#RGp:%u, rc=%Rrc\n", GCPhysSimp,
326 offMsg, rc));
327 }
328 }
329 }
330
331 /*
332 * Make the call fail after updating the SIMP, so the guest can go back to using
333 * the Hyper-V debug MSR interface. Any error code below GIM_HV_STATUS_NOT_ACKNOWLEDGED
334 * and the guest tries to proceed with initializing VMBus which is totally unnecessary
335 * for what we're trying to accomplish, i.e. convince guest to use Hyper-V debugging. Also,
336 * we don't implement other VMBus/SynIC functionality so the guest would #GP and die.
337 */
338 rcHv = GIM_HV_STATUS_NOT_ACKNOWLEDGED;
339 }
340 else
341 rcHv = GIM_HV_STATUS_INVALID_PARAMETER;
342 }
343 else
344 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
345 break;
346 }
347
348 case GIM_HV_EXT_HYPERCALL_OP_QUERY_CAP: /* Non-rep, extended hypercall. */
349 {
350 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_EXTENDED_HYPERCALLS)
351 {
352 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_OUT, &rcHv);
353 if ( RT_SUCCESS(rc)
354 && rcHv == GIM_HV_STATUS_SUCCESS)
355 {
356 rc = gimR3HvHypercallExtQueryCap(pVM, &rcHv);
357 }
358 }
359 else
360 {
361 LogRel(("GIM: HyperV: Denied HvExtCallQueryCapabilities when the feature is not exposed\n"));
362 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
363 }
364 break;
365 }
366
367 case GIM_HV_EXT_HYPERCALL_OP_GET_BOOT_ZEROED_MEM: /* Non-rep, extended hypercall. */
368 {
369 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_EXTENDED_HYPERCALLS)
370 {
371 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_OUT, &rcHv);
372 if ( RT_SUCCESS(rc)
373 && rcHv == GIM_HV_STATUS_SUCCESS)
374 {
375 rc = gimR3HvHypercallExtGetBootZeroedMem(pVM, &rcHv);
376 }
377 }
378 else
379 {
380 LogRel(("GIM: HyperV: Denied HvExtCallGetBootZeroedMemory when the feature is not exposed\n"));
381 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
382 }
383 break;
384 }
385
386 default:
387 {
388 LogRel(("GIM: HyperV: Unknown/invalid hypercall opcode %#x (%u)\n", uHyperOp, uHyperOp));
389 rcHv = GIM_HV_STATUS_INVALID_HYPERCALL_CODE;
390 break;
391 }
392 }
393 }
394 else
395 rcHv = GIM_HV_STATUS_INVALID_HYPERCALL_INPUT;
396
397 /*
398 * Update the guest with results of the hypercall.
399 */
400 if (RT_SUCCESS(rc))
401 {
402 if (fIs64BitMode)
403 pCtx->rax = (cHyperRepsDone << 32) | rcHv;
404 else
405 {
406 pCtx->edx = cHyperRepsDone;
407 pCtx->eax = rcHv;
408 }
409 }
410
411 return rc;
412#endif
413}
414
415
416/**
417 * Returns a pointer to the MMIO2 regions supported by Hyper-V.
418 *
419 * @returns Pointer to an array of MMIO2 regions.
420 * @param pVM The cross context VM structure.
421 * @param pcRegions Where to store the number of regions in the array.
422 */
423VMM_INT_DECL(PGIMMMIO2REGION) gimHvGetMmio2Regions(PVM pVM, uint32_t *pcRegions)
424{
425 Assert(GIMIsEnabled(pVM));
426 PGIMHV pHv = &pVM->gim.s.u.Hv;
427
428 AssertCompile(RT_ELEMENTS(pHv->aMmio2Regions) <= 8);
429 *pcRegions = RT_ELEMENTS(pHv->aMmio2Regions);
430 return pHv->aMmio2Regions;
431}
432
433
434/**
435 * Returns whether the guest has configured and enabled the use of Hyper-V's
436 * hypercall interface.
437 *
438 * @returns true if hypercalls are enabled, false otherwise.
439 * @param pVM The cross context VM structure.
440 */
441VMM_INT_DECL(bool) gimHvAreHypercallsEnabled(PCVM pVM)
442{
443 return RT_BOOL(pVM->gim.s.u.Hv.u64GuestOsIdMsr != 0);
444}
445
446
447/**
448 * Returns whether the guest has configured and enabled the use of Hyper-V's
449 * paravirtualized TSC.
450 *
451 * @returns true if paravirt. TSC is enabled, false otherwise.
452 * @param pVM The cross context VM structure.
453 */
454VMM_INT_DECL(bool) gimHvIsParavirtTscEnabled(PVM pVM)
455{
456 return MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr);
457}
458
459
460#ifdef IN_RING3
461/**
462 * Gets the descriptive OS ID variant as identified via the
463 * MSR_GIM_HV_GUEST_OS_ID MSR.
464 *
465 * @returns The name.
466 * @param uGuestOsIdMsr The MSR_GIM_HV_GUEST_OS_ID MSR.
467 */
468static const char *gimHvGetGuestOsIdVariantName(uint64_t uGuestOsIdMsr)
469{
470 /* Refer the Hyper-V spec, section 3.6 "Reporting the Guest OS Identity". */
471 uint32_t uVendor = MSR_GIM_HV_GUEST_OS_ID_VENDOR(uGuestOsIdMsr);
472 if (uVendor == 1 /* Microsoft */)
473 {
474 uint32_t uOsVariant = MSR_GIM_HV_GUEST_OS_ID_OS_VARIANT(uGuestOsIdMsr);
475 switch (uOsVariant)
476 {
477 case 0: return "Undefined";
478 case 1: return "MS-DOS";
479 case 2: return "Windows 3.x";
480 case 3: return "Windows 9x";
481 case 4: return "Windows NT or derivative";
482 case 5: return "Windows CE";
483 default: return "Unknown";
484 }
485 }
486 return "Unknown";
487}
488#endif
489
490/**
491 * Gets the time reference count for the current VM.
492 *
493 * @returns The time reference count.
494 * @param pVCpu The cross context virtual CPU structure.
495 */
496DECLINLINE(uint64_t) gimHvGetTimeRefCount(PVMCPUCC pVCpu)
497{
498 /* Hyper-V reports the time in 100 ns units (10 MHz). */
499 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
500 PCGIMHV pHv = &pVCpu->CTX_SUFF(pVM)->gim.s.u.Hv;
501 uint64_t const u64Tsc = TMCpuTickGet(pVCpu); /** @todo should we be passing VCPU0 always? */
502 uint64_t const u64TscHz = pHv->cTscTicksPerSecond;
503 uint64_t const u64Tsc100NS = u64TscHz / UINT64_C(10000000); /* 100 ns */
504 uint64_t const uTimeRefCount = (u64Tsc / u64Tsc100NS);
505 return uTimeRefCount;
506}
507
508
509/**
510 * Starts the synthetic timer.
511 *
512 * @param pVCpu The cross context virtual CPU structure.
513 * @param pHvStimer Pointer to the Hyper-V synthetic timer.
514 *
515 * @remarks Caller needs to hold the timer critical section.
516 * @thread Any.
517 */
518VMM_INT_DECL(void) gimHvStartStimer(PVMCPUCC pVCpu, PCGIMHVSTIMER pHvStimer)
519{
520 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
521 TMTIMERHANDLE hTimer = pHvStimer->hTimer;
522 Assert(TMTimerIsLockOwner(pVM, hTimer));
523
524 uint64_t const uTimerCount = pHvStimer->uStimerCountMsr;
525 if (uTimerCount)
526 {
527 uint64_t const uTimerCountNS = uTimerCount * 100;
528
529 /* For periodic timers, 'uTimerCountNS' represents the relative interval. */
530 if (MSR_GIM_HV_STIMER_IS_PERIODIC(pHvStimer->uStimerConfigMsr))
531 {
532 TMTimerSetNano(pVM, hTimer, uTimerCountNS);
533 LogFlow(("GIM%u: HyperV: Started relative periodic STIMER%u with uTimerCountNS=%RU64\n", pVCpu->idCpu,
534 pHvStimer->idxStimer, uTimerCountNS));
535 }
536 else
537 {
538 /* For one-shot timers, 'uTimerCountNS' represents an absolute expiration wrt to Hyper-V reference time,
539 we convert it to a relative time and program the timer. */
540 uint64_t const uCurRefTimeNS = gimHvGetTimeRefCount(pVCpu) * 100;
541 if (uTimerCountNS > uCurRefTimeNS)
542 {
543 uint64_t const uRelativeNS = uTimerCountNS - uCurRefTimeNS;
544 TMTimerSetNano(pVM, hTimer, uRelativeNS);
545 LogFlow(("GIM%u: HyperV: Started one-shot relative STIMER%u with uRelativeNS=%RU64\n", pVCpu->idCpu,
546 pHvStimer->idxStimer, uRelativeNS));
547 }
548 }
549 /** @todo frequency hinting? */
550 }
551}
552
553
554/**
555 * Stops the synthetic timer for the given VCPU.
556 *
557 * @param pVCpu The cross context virtual CPU structure.
558 * @param pHvStimer Pointer to the Hyper-V synthetic timer.
559 *
560 * @remarks Caller needs to the hold the timer critical section.
561 * @thread EMT(pVCpu).
562 */
563static void gimHvStopStimer(PVMCPUCC pVCpu, PGIMHVSTIMER pHvStimer)
564{
565 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
566 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
567
568 TMTIMERHANDLE hTimer = pHvStimer->hTimer;
569 Assert(TMTimerIsLockOwner(pVM, hTimer));
570
571 if (TMTimerIsActive(pVM, hTimer))
572 TMTimerStop(pVM, hTimer);
573}
574
575
576/**
577 * MSR read handler for Hyper-V.
578 *
579 * @returns Strict VBox status code like CPUMQueryGuestMsr().
580 * @retval VINF_CPUM_R3_MSR_READ
581 * @retval VERR_CPUM_RAISE_GP_0
582 *
583 * @param pVCpu The cross context virtual CPU structure.
584 * @param idMsr The MSR being read.
585 * @param pRange The range this MSR belongs to.
586 * @param puValue Where to store the MSR value read.
587 *
588 * @thread EMT.
589 */
590VMM_INT_DECL(VBOXSTRICTRC) gimHvReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
591{
592 NOREF(pRange);
593 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
594 PCGIMHV pHv = &pVM->gim.s.u.Hv;
595
596 switch (idMsr)
597 {
598 case MSR_GIM_HV_TIME_REF_COUNT:
599 *puValue = gimHvGetTimeRefCount(pVCpu);
600 return VINF_SUCCESS;
601
602 case MSR_GIM_HV_VP_INDEX:
603 *puValue = pVCpu->idCpu;
604 return VINF_SUCCESS;
605
606 case MSR_GIM_HV_TPR:
607 *puValue = APICHvGetTpr(pVCpu);
608 return VINF_SUCCESS;
609
610 case MSR_GIM_HV_ICR:
611 *puValue = APICHvGetIcr(pVCpu);
612 return VINF_SUCCESS;
613
614 case MSR_GIM_HV_GUEST_OS_ID:
615 *puValue = pHv->u64GuestOsIdMsr;
616 return VINF_SUCCESS;
617
618 case MSR_GIM_HV_HYPERCALL:
619 *puValue = pHv->u64HypercallMsr;
620 return VINF_SUCCESS;
621
622 case MSR_GIM_HV_REF_TSC:
623 *puValue = pHv->u64TscPageMsr;
624 return VINF_SUCCESS;
625
626 case MSR_GIM_HV_TSC_FREQ:
627 *puValue = TMCpuTicksPerSecond(pVM);
628 return VINF_SUCCESS;
629
630 case MSR_GIM_HV_APIC_FREQ:
631 {
632 int rc = APICGetTimerFreq(pVM, puValue);
633 if (RT_FAILURE(rc))
634 return VERR_CPUM_RAISE_GP_0;
635 return VINF_SUCCESS;
636 }
637
638 case MSR_GIM_HV_SYNTH_DEBUG_STATUS:
639 *puValue = pHv->uDbgStatusMsr;
640 return VINF_SUCCESS;
641
642 case MSR_GIM_HV_SINT0: case MSR_GIM_HV_SINT1: case MSR_GIM_HV_SINT2: case MSR_GIM_HV_SINT3:
643 case MSR_GIM_HV_SINT4: case MSR_GIM_HV_SINT5: case MSR_GIM_HV_SINT6: case MSR_GIM_HV_SINT7:
644 case MSR_GIM_HV_SINT8: case MSR_GIM_HV_SINT9: case MSR_GIM_HV_SINT10: case MSR_GIM_HV_SINT11:
645 case MSR_GIM_HV_SINT12: case MSR_GIM_HV_SINT13: case MSR_GIM_HV_SINT14: case MSR_GIM_HV_SINT15:
646 {
647 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
648 *puValue = pHvCpu->auSintMsrs[idMsr - MSR_GIM_HV_SINT0];
649 return VINF_SUCCESS;
650 }
651
652 case MSR_GIM_HV_STIMER0_CONFIG:
653 case MSR_GIM_HV_STIMER1_CONFIG:
654 case MSR_GIM_HV_STIMER2_CONFIG:
655 case MSR_GIM_HV_STIMER3_CONFIG:
656 {
657 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
658 uint8_t const idxStimer = (idMsr - MSR_GIM_HV_STIMER0_CONFIG) >> 1;
659 PCGIMHVSTIMER pcHvStimer = &pHvCpu->aStimers[idxStimer];
660 *puValue = pcHvStimer->uStimerConfigMsr;
661 return VINF_SUCCESS;
662 }
663
664 case MSR_GIM_HV_STIMER0_COUNT:
665 case MSR_GIM_HV_STIMER1_COUNT:
666 case MSR_GIM_HV_STIMER2_COUNT:
667 case MSR_GIM_HV_STIMER3_COUNT:
668 {
669 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
670 uint8_t const idxStimer = (idMsr - MSR_GIM_HV_STIMER0_COUNT) >> 1;
671 PCGIMHVSTIMER pcHvStimer = &pHvCpu->aStimers[idxStimer];
672 *puValue = pcHvStimer->uStimerCountMsr;
673 return VINF_SUCCESS;
674 }
675
676 case MSR_GIM_HV_EOM:
677 {
678 *puValue = 0;
679 return VINF_SUCCESS;
680 }
681
682 case MSR_GIM_HV_SCONTROL:
683 {
684 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
685 *puValue = pHvCpu->uSControlMsr;
686 return VINF_SUCCESS;
687 }
688
689 case MSR_GIM_HV_SIMP:
690 {
691 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
692 *puValue = pHvCpu->uSimpMsr;
693 return VINF_SUCCESS;
694 }
695
696 case MSR_GIM_HV_SVERSION:
697 *puValue = GIM_HV_SVERSION;
698 return VINF_SUCCESS;
699
700 case MSR_GIM_HV_RESET:
701 *puValue = 0;
702 return VINF_SUCCESS;
703
704 case MSR_GIM_HV_CRASH_CTL:
705 *puValue = pHv->uCrashCtlMsr;
706 return VINF_SUCCESS;
707
708 case MSR_GIM_HV_CRASH_P0: *puValue = pHv->uCrashP0Msr; return VINF_SUCCESS;
709 case MSR_GIM_HV_CRASH_P1: *puValue = pHv->uCrashP1Msr; return VINF_SUCCESS;
710 case MSR_GIM_HV_CRASH_P2: *puValue = pHv->uCrashP2Msr; return VINF_SUCCESS;
711 case MSR_GIM_HV_CRASH_P3: *puValue = pHv->uCrashP3Msr; return VINF_SUCCESS;
712 case MSR_GIM_HV_CRASH_P4: *puValue = pHv->uCrashP4Msr; return VINF_SUCCESS;
713
714 case MSR_GIM_HV_DEBUG_OPTIONS_MSR:
715 {
716 if (pHv->fIsVendorMsHv)
717 {
718#ifndef IN_RING3
719 return VINF_CPUM_R3_MSR_READ;
720#else
721 LogRelMax(1, ("GIM: HyperV: Guest querying debug options, suggesting %s interface\n",
722 pHv->fDbgHypercallInterface ? "hypercall" : "MSR"));
723 *puValue = pHv->fDbgHypercallInterface ? GIM_HV_DEBUG_OPTIONS_USE_HYPERCALLS : 0;
724 return VINF_SUCCESS;
725#endif
726 }
727 break;
728 }
729
730 /* Write-only MSRs: */
731 case MSR_GIM_HV_EOI:
732 /* Reserved/unknown MSRs: */
733 default:
734 {
735#ifdef IN_RING3
736 static uint32_t s_cTimes = 0;
737 if (s_cTimes++ < 20)
738 LogRel(("GIM: HyperV: Unknown/invalid RdMsr (%#x) -> #GP(0)\n", idMsr));
739 LogFunc(("Unknown/invalid RdMsr (%#RX32) -> #GP(0)\n", idMsr));
740 break;
741#else
742 return VINF_CPUM_R3_MSR_READ;
743#endif
744 }
745 }
746
747 return VERR_CPUM_RAISE_GP_0;
748}
749
750
751/**
752 * MSR write handler for Hyper-V.
753 *
754 * @returns Strict VBox status code like CPUMSetGuestMsr().
755 * @retval VINF_CPUM_R3_MSR_WRITE
756 * @retval VERR_CPUM_RAISE_GP_0
757 *
758 * @param pVCpu The cross context virtual CPU structure.
759 * @param idMsr The MSR being written.
760 * @param pRange The range this MSR belongs to.
761 * @param uRawValue The raw value with the ignored bits not masked.
762 *
763 * @thread EMT.
764 */
765VMM_INT_DECL(VBOXSTRICTRC) gimHvWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue)
766{
767 NOREF(pRange);
768 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
769 PGIMHV pHv = &pVM->gim.s.u.Hv;
770
771 switch (idMsr)
772 {
773 case MSR_GIM_HV_TPR:
774 return APICHvSetTpr(pVCpu, uRawValue);
775
776 case MSR_GIM_HV_EOI:
777 return APICHvSetEoi(pVCpu, uRawValue);
778
779 case MSR_GIM_HV_ICR:
780 return APICHvSetIcr(pVCpu, uRawValue);
781
782 case MSR_GIM_HV_GUEST_OS_ID:
783 {
784#ifndef IN_RING3
785 return VINF_CPUM_R3_MSR_WRITE;
786#else
787 /* Disable the hypercall-page and hypercalls if 0 is written to this MSR. */
788 if (!uRawValue)
789 {
790 if (MSR_GIM_HV_HYPERCALL_PAGE_IS_ENABLED(pHv->u64HypercallMsr))
791 {
792 gimR3HvDisableHypercallPage(pVM);
793 pHv->u64HypercallMsr &= ~MSR_GIM_HV_HYPERCALL_PAGE_ENABLE;
794 LogRel(("GIM: HyperV: Hypercall page disabled via Guest OS ID MSR\n"));
795 }
796 }
797 else
798 {
799 LogRel(("GIM: HyperV: Guest OS reported ID %#RX64\n", uRawValue));
800 LogRel(("GIM: HyperV: Open-source=%RTbool Vendor=%#x OS=%#x (%s) Major=%u Minor=%u ServicePack=%u Build=%u\n",
801 MSR_GIM_HV_GUEST_OS_ID_IS_OPENSOURCE(uRawValue), MSR_GIM_HV_GUEST_OS_ID_VENDOR(uRawValue),
802 MSR_GIM_HV_GUEST_OS_ID_OS_VARIANT(uRawValue), gimHvGetGuestOsIdVariantName(uRawValue),
803 MSR_GIM_HV_GUEST_OS_ID_MAJOR_VERSION(uRawValue), MSR_GIM_HV_GUEST_OS_ID_MINOR_VERSION(uRawValue),
804 MSR_GIM_HV_GUEST_OS_ID_SERVICE_VERSION(uRawValue), MSR_GIM_HV_GUEST_OS_ID_BUILD(uRawValue)));
805
806 /* Update the CPUID leaf, see Hyper-V spec. "Microsoft Hypervisor CPUID Leaves". */
807 CPUMCPUIDLEAF HyperLeaf;
808 RT_ZERO(HyperLeaf);
809 HyperLeaf.uLeaf = UINT32_C(0x40000002);
810 HyperLeaf.uEax = MSR_GIM_HV_GUEST_OS_ID_BUILD(uRawValue);
811 HyperLeaf.uEbx = MSR_GIM_HV_GUEST_OS_ID_MINOR_VERSION(uRawValue)
812 | (MSR_GIM_HV_GUEST_OS_ID_MAJOR_VERSION(uRawValue) << 16);
813 HyperLeaf.uEcx = MSR_GIM_HV_GUEST_OS_ID_SERVICE_VERSION(uRawValue);
814 HyperLeaf.uEdx = MSR_GIM_HV_GUEST_OS_ID_SERVICE_VERSION(uRawValue)
815 | (MSR_GIM_HV_GUEST_OS_ID_BUILD(uRawValue) << 24);
816 int rc2 = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
817 AssertRC(rc2);
818 }
819
820 pHv->u64GuestOsIdMsr = uRawValue;
821
822 /*
823 * Update EM on hypercall instruction enabled state.
824 */
825 if (uRawValue)
826 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
827 EMSetHypercallInstructionsEnabled(pVM->CTX_SUFF(apCpus)[idCpu], true);
828 else
829 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
830 EMSetHypercallInstructionsEnabled(pVM->CTX_SUFF(apCpus)[idCpu], false);
831
832 return VINF_SUCCESS;
833#endif /* IN_RING3 */
834 }
835
836 case MSR_GIM_HV_HYPERCALL:
837 {
838#ifndef IN_RING3
839 return VINF_CPUM_R3_MSR_WRITE;
840#else
841 /** @todo There is/was a problem with hypercalls for FreeBSD 10.1 guests,
842 * see @bugref{7270#c116}. */
843 /* First, update all but the hypercall page enable bit. */
844 pHv->u64HypercallMsr = (uRawValue & ~MSR_GIM_HV_HYPERCALL_PAGE_ENABLE);
845
846 /* Hypercall page can only be enabled when the guest has enabled hypercalls. */
847 bool fEnable = MSR_GIM_HV_HYPERCALL_PAGE_IS_ENABLED(uRawValue);
848 if ( fEnable
849 && !gimHvAreHypercallsEnabled(pVM))
850 {
851 return VINF_SUCCESS;
852 }
853
854 /* Is the guest disabling the hypercall-page? Allow it regardless of the Guest-OS Id Msr. */
855 if (!fEnable)
856 {
857 gimR3HvDisableHypercallPage(pVM);
858 pHv->u64HypercallMsr = uRawValue;
859 return VINF_SUCCESS;
860 }
861
862 /* Enable the hypercall-page. */
863 RTGCPHYS GCPhysHypercallPage = MSR_GIM_HV_HYPERCALL_GUEST_PFN(uRawValue) << GUEST_PAGE_SHIFT;
864 int rc = gimR3HvEnableHypercallPage(pVM, GCPhysHypercallPage);
865 if (RT_SUCCESS(rc))
866 {
867 pHv->u64HypercallMsr = uRawValue;
868 return VINF_SUCCESS;
869 }
870
871 return VERR_CPUM_RAISE_GP_0;
872#endif
873 }
874
875 case MSR_GIM_HV_REF_TSC:
876 {
877#ifndef IN_RING3
878 return VINF_CPUM_R3_MSR_WRITE;
879#else /* IN_RING3 */
880 /* First, update all but the TSC page enable bit. */
881 pHv->u64TscPageMsr = (uRawValue & ~MSR_GIM_HV_REF_TSC_ENABLE);
882
883 /* Is the guest disabling the TSC page? */
884 bool fEnable = MSR_GIM_HV_REF_TSC_IS_ENABLED(uRawValue);
885 if (!fEnable)
886 {
887 gimR3HvDisableTscPage(pVM);
888 pHv->u64TscPageMsr = uRawValue;
889 return VINF_SUCCESS;
890 }
891
892 /* Enable the TSC page. */
893 RTGCPHYS GCPhysTscPage = MSR_GIM_HV_REF_TSC_GUEST_PFN(uRawValue) << GUEST_PAGE_SHIFT;
894 int rc = gimR3HvEnableTscPage(pVM, GCPhysTscPage, false /* fUseThisTscSequence */, 0 /* uTscSequence */);
895 if (RT_SUCCESS(rc))
896 {
897 pHv->u64TscPageMsr = uRawValue;
898 return VINF_SUCCESS;
899 }
900
901 return VERR_CPUM_RAISE_GP_0;
902#endif /* IN_RING3 */
903 }
904
905 case MSR_GIM_HV_APIC_ASSIST_PAGE:
906 {
907#ifndef IN_RING3
908 return VINF_CPUM_R3_MSR_WRITE;
909#else /* IN_RING3 */
910 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
911 pHvCpu->uApicAssistPageMsr = uRawValue;
912
913 if (MSR_GIM_HV_APICASSIST_PAGE_IS_ENABLED(uRawValue))
914 {
915 RTGCPHYS GCPhysApicAssistPage = MSR_GIM_HV_APICASSIST_GUEST_PFN(uRawValue) << GUEST_PAGE_SHIFT;
916 if (PGMPhysIsGCPhysNormal(pVM, GCPhysApicAssistPage))
917 {
918 int rc = gimR3HvEnableApicAssistPage(pVCpu, GCPhysApicAssistPage);
919 if (RT_SUCCESS(rc))
920 {
921 pHvCpu->uApicAssistPageMsr = uRawValue;
922 return VINF_SUCCESS;
923 }
924 }
925 else
926 {
927 LogRelMax(5, ("GIM%u: HyperV: APIC-assist page address %#RGp invalid!\n", pVCpu->idCpu,
928 GCPhysApicAssistPage));
929 }
930 }
931 else
932 gimR3HvDisableApicAssistPage(pVCpu);
933
934 return VERR_CPUM_RAISE_GP_0;
935#endif /* IN_RING3 */
936 }
937
938 case MSR_GIM_HV_RESET:
939 {
940#ifndef IN_RING3
941 return VINF_CPUM_R3_MSR_WRITE;
942#else
943 if (MSR_GIM_HV_RESET_IS_ENABLED(uRawValue))
944 {
945 LogRel(("GIM: HyperV: Reset initiated through MSR\n"));
946 int rc = PDMDevHlpVMReset(pVM->gim.s.pDevInsR3, PDMVMRESET_F_GIM);
947 AssertRC(rc); /* Note! Not allowed to return VINF_EM_RESET / VINF_EM_HALT here, so ignore them. */
948 }
949 /* else: Ignore writes to other bits. */
950 return VINF_SUCCESS;
951#endif /* IN_RING3 */
952 }
953
954 case MSR_GIM_HV_CRASH_CTL:
955 {
956#ifndef IN_RING3
957 return VINF_CPUM_R3_MSR_WRITE;
958#else
959 if (uRawValue & MSR_GIM_HV_CRASH_CTL_NOTIFY)
960 {
961 LogRel(("GIM: HyperV: Guest indicates a fatal condition! P0=%#RX64 P1=%#RX64 P2=%#RX64 P3=%#RX64 P4=%#RX64\n",
962 pHv->uCrashP0Msr, pHv->uCrashP1Msr, pHv->uCrashP2Msr, pHv->uCrashP3Msr, pHv->uCrashP4Msr));
963 DBGFR3ReportBugCheck(pVM, pVCpu, DBGFEVENT_BSOD_MSR, pHv->uCrashP0Msr, pHv->uCrashP1Msr,
964 pHv->uCrashP2Msr, pHv->uCrashP3Msr, pHv->uCrashP4Msr);
965 /* (Do not try pass VINF_EM_DBG_EVENT, doesn't work from here!) */
966 }
967 return VINF_SUCCESS;
968#endif
969 }
970
971 case MSR_GIM_HV_SYNTH_DEBUG_SEND_BUFFER:
972 {
973 if (!pHv->fDbgEnabled)
974 return VERR_CPUM_RAISE_GP_0;
975#ifndef IN_RING3
976 return VINF_CPUM_R3_MSR_WRITE;
977#else
978 RTGCPHYS GCPhysBuffer = (RTGCPHYS)uRawValue;
979 pHv->uDbgSendBufferMsr = GCPhysBuffer;
980 if (PGMPhysIsGCPhysNormal(pVM, GCPhysBuffer))
981 LogRel(("GIM: HyperV: Set up debug send buffer at %#RGp\n", GCPhysBuffer));
982 else
983 LogRel(("GIM: HyperV: Destroyed debug send buffer\n"));
984 pHv->uDbgSendBufferMsr = uRawValue;
985 return VINF_SUCCESS;
986#endif
987 }
988
989 case MSR_GIM_HV_SYNTH_DEBUG_RECEIVE_BUFFER:
990 {
991 if (!pHv->fDbgEnabled)
992 return VERR_CPUM_RAISE_GP_0;
993#ifndef IN_RING3
994 return VINF_CPUM_R3_MSR_WRITE;
995#else
996 RTGCPHYS GCPhysBuffer = (RTGCPHYS)uRawValue;
997 pHv->uDbgRecvBufferMsr = GCPhysBuffer;
998 if (PGMPhysIsGCPhysNormal(pVM, GCPhysBuffer))
999 LogRel(("GIM: HyperV: Set up debug receive buffer at %#RGp\n", GCPhysBuffer));
1000 else
1001 LogRel(("GIM: HyperV: Destroyed debug receive buffer\n"));
1002 return VINF_SUCCESS;
1003#endif
1004 }
1005
1006 case MSR_GIM_HV_SYNTH_DEBUG_PENDING_BUFFER:
1007 {
1008 if (!pHv->fDbgEnabled)
1009 return VERR_CPUM_RAISE_GP_0;
1010#ifndef IN_RING3
1011 return VINF_CPUM_R3_MSR_WRITE;
1012#else
1013 RTGCPHYS GCPhysBuffer = (RTGCPHYS)uRawValue;
1014 pHv->uDbgPendingBufferMsr = GCPhysBuffer;
1015 if (PGMPhysIsGCPhysNormal(pVM, GCPhysBuffer))
1016 LogRel(("GIM: HyperV: Set up debug pending buffer at %#RGp\n", uRawValue));
1017 else
1018 LogRel(("GIM: HyperV: Destroyed debug pending buffer\n"));
1019 return VINF_SUCCESS;
1020#endif
1021 }
1022
1023 case MSR_GIM_HV_SYNTH_DEBUG_CONTROL:
1024 {
1025 if (!pHv->fDbgEnabled)
1026 return VERR_CPUM_RAISE_GP_0;
1027#ifndef IN_RING3
1028 return VINF_CPUM_R3_MSR_WRITE;
1029#else
1030 if ( MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_WRITE(uRawValue)
1031 && MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_READ(uRawValue))
1032 {
1033 LogRel(("GIM: HyperV: Requesting both read and write through debug control MSR -> #GP(0)\n"));
1034 return VERR_CPUM_RAISE_GP_0;
1035 }
1036
1037 if (MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_WRITE(uRawValue))
1038 {
1039 uint32_t cbWrite = MSR_GIM_HV_SYNTH_DEBUG_CONTROL_W_LEN(uRawValue);
1040 if ( cbWrite > 0
1041 && cbWrite < GIM_HV_PAGE_SIZE)
1042 {
1043 if (PGMPhysIsGCPhysNormal(pVM, (RTGCPHYS)pHv->uDbgSendBufferMsr))
1044 {
1045 Assert(pHv->pvDbgBuffer);
1046 int rc = PGMPhysSimpleReadGCPhys(pVM, pHv->pvDbgBuffer, (RTGCPHYS)pHv->uDbgSendBufferMsr, cbWrite);
1047 if (RT_SUCCESS(rc))
1048 {
1049 LogRelMax(1, ("GIM: HyperV: Initiated debug data transmission via MSR\n"));
1050 uint32_t cbWritten = 0;
1051 rc = gimR3HvDebugWrite(pVM, pHv->pvDbgBuffer, cbWrite, &cbWritten, false /*fUdpPkt*/);
1052 if ( RT_SUCCESS(rc)
1053 && cbWrite == cbWritten)
1054 pHv->uDbgStatusMsr = MSR_GIM_HV_SYNTH_DEBUG_STATUS_W_SUCCESS;
1055 else
1056 pHv->uDbgStatusMsr = 0;
1057 }
1058 else
1059 LogRelMax(5, ("GIM: HyperV: Failed to read debug send buffer at %#RGp, rc=%Rrc\n",
1060 (RTGCPHYS)pHv->uDbgSendBufferMsr, rc));
1061 }
1062 else
1063 LogRelMax(5, ("GIM: HyperV: Debug send buffer address %#RGp invalid! Ignoring debug write!\n",
1064 (RTGCPHYS)pHv->uDbgSendBufferMsr));
1065 }
1066 else
1067 LogRelMax(5, ("GIM: HyperV: Invalid write size %u specified in MSR, ignoring debug write!\n",
1068 MSR_GIM_HV_SYNTH_DEBUG_CONTROL_W_LEN(uRawValue)));
1069 }
1070 else if (MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_READ(uRawValue))
1071 {
1072 if (PGMPhysIsGCPhysNormal(pVM, (RTGCPHYS)pHv->uDbgRecvBufferMsr))
1073 {
1074 LogRelMax(1, ("GIM: HyperV: Initiated debug data reception via MSR\n"));
1075 uint32_t cbReallyRead;
1076 Assert(pHv->pvDbgBuffer);
1077 int rc = gimR3HvDebugRead(pVM, pHv->pvDbgBuffer, GIM_HV_PAGE_SIZE, GIM_HV_PAGE_SIZE,
1078 &cbReallyRead, 0, false /*fUdpPkt*/);
1079 if ( RT_SUCCESS(rc)
1080 && cbReallyRead > 0)
1081 {
1082 rc = PGMPhysSimpleWriteGCPhys(pVM, (RTGCPHYS)pHv->uDbgRecvBufferMsr, pHv->pvDbgBuffer, cbReallyRead);
1083 if (RT_SUCCESS(rc))
1084 {
1085 pHv->uDbgStatusMsr = ((uint16_t)cbReallyRead) << 16;
1086 pHv->uDbgStatusMsr |= MSR_GIM_HV_SYNTH_DEBUG_STATUS_R_SUCCESS;
1087 }
1088 else
1089 {
1090 pHv->uDbgStatusMsr = 0;
1091 LogRelMax(5, ("GIM: HyperV: PGMPhysSimpleWriteGCPhys failed. rc=%Rrc\n", rc));
1092 }
1093 }
1094 else
1095 pHv->uDbgStatusMsr = 0;
1096 }
1097 else
1098 {
1099 LogRelMax(5, ("GIM: HyperV: Debug receive buffer address %#RGp invalid! Ignoring debug read!\n",
1100 (RTGCPHYS)pHv->uDbgRecvBufferMsr));
1101 }
1102 }
1103 return VINF_SUCCESS;
1104#endif
1105 }
1106
1107 case MSR_GIM_HV_SINT0: case MSR_GIM_HV_SINT1: case MSR_GIM_HV_SINT2: case MSR_GIM_HV_SINT3:
1108 case MSR_GIM_HV_SINT4: case MSR_GIM_HV_SINT5: case MSR_GIM_HV_SINT6: case MSR_GIM_HV_SINT7:
1109 case MSR_GIM_HV_SINT8: case MSR_GIM_HV_SINT9: case MSR_GIM_HV_SINT10: case MSR_GIM_HV_SINT11:
1110 case MSR_GIM_HV_SINT12: case MSR_GIM_HV_SINT13: case MSR_GIM_HV_SINT14: case MSR_GIM_HV_SINT15:
1111 {
1112 uint8_t uVector = MSR_GIM_HV_SINT_GET_VECTOR(uRawValue);
1113 bool const fVMBusMsg = RT_BOOL(idMsr == GIM_HV_VMBUS_MSG_SINT);
1114 size_t const idxSintMsr = idMsr - MSR_GIM_HV_SINT0;
1115 const char *pszDesc = fVMBusMsg ? "VMBus Message" : "Generic";
1116 if (uVector < GIM_HV_SINT_VECTOR_VALID_MIN)
1117 {
1118 LogRel(("GIM%u: HyperV: Programmed an invalid vector in SINT%u (%s), uVector=%u -> #GP(0)\n", pVCpu->idCpu,
1119 idxSintMsr, pszDesc, uVector));
1120 return VERR_CPUM_RAISE_GP_0;
1121 }
1122
1123 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1124 pHvCpu->auSintMsrs[idxSintMsr] = uRawValue;
1125 if (fVMBusMsg)
1126 {
1127 if (MSR_GIM_HV_SINT_IS_MASKED(uRawValue))
1128 Log(("GIM%u: HyperV: Masked SINT%u (%s)\n", pVCpu->idCpu, idxSintMsr, pszDesc));
1129 else
1130 Log(("GIM%u: HyperV: Unmasked SINT%u (%s), uVector=%u\n", pVCpu->idCpu, idxSintMsr, pszDesc, uVector));
1131 }
1132 Log(("GIM%u: HyperV: Written SINT%u=%#RX64\n", pVCpu->idCpu, idxSintMsr, uRawValue));
1133 return VINF_SUCCESS;
1134 }
1135
1136 case MSR_GIM_HV_SCONTROL:
1137 {
1138#ifndef IN_RING3
1139 /** @todo make this RZ later? */
1140 return VINF_CPUM_R3_MSR_WRITE;
1141#else
1142 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1143 pHvCpu->uSControlMsr = uRawValue;
1144 if (MSR_GIM_HV_SCONTROL_IS_ENABLED(uRawValue))
1145 LogRel(("GIM%u: HyperV: Synthetic interrupt control enabled\n", pVCpu->idCpu));
1146 else
1147 LogRel(("GIM%u: HyperV: Synthetic interrupt control disabled\n", pVCpu->idCpu));
1148 return VINF_SUCCESS;
1149#endif
1150 }
1151
1152 case MSR_GIM_HV_STIMER0_CONFIG:
1153 case MSR_GIM_HV_STIMER1_CONFIG:
1154 case MSR_GIM_HV_STIMER2_CONFIG:
1155 case MSR_GIM_HV_STIMER3_CONFIG:
1156 {
1157 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1158 uint8_t const idxStimer = (idMsr - MSR_GIM_HV_STIMER0_CONFIG) >> 1;
1159
1160 /* Validate the writable bits. */
1161 if (RT_LIKELY(!(uRawValue & ~MSR_GIM_HV_STIMER_RW_VALID)))
1162 {
1163 Assert(idxStimer < RT_ELEMENTS(pHvCpu->aStimers));
1164 PGIMHVSTIMER pHvStimer = &pHvCpu->aStimers[idxStimer];
1165
1166 /* Lock to prevent concurrent access from the timer callback. */
1167 int rc = TMTimerLock(pVM, pHvStimer->hTimer, VERR_IGNORED);
1168 if (rc == VINF_SUCCESS)
1169 {
1170 /* Update the MSR value. */
1171 pHvStimer->uStimerConfigMsr = uRawValue;
1172 Log(("GIM%u: HyperV: Set STIMER_CONFIG%u=%#RX64\n", pVCpu->idCpu, idxStimer, uRawValue));
1173
1174 /* Process the MSR bits. */
1175 if ( !MSR_GIM_HV_STIMER_GET_SINTX(uRawValue) /* Writing SINTx as 0 causes the timer to be disabled. */
1176 || !MSR_GIM_HV_STIMER_IS_ENABLED(uRawValue))
1177 {
1178 pHvStimer->uStimerConfigMsr &= ~MSR_GIM_HV_STIMER_ENABLE;
1179 gimHvStopStimer(pVCpu, pHvStimer);
1180 Log(("GIM%u: HyperV: Disabled STIMER_CONFIG%u\n", pVCpu->idCpu, idxStimer));
1181 }
1182 else if (MSR_GIM_HV_STIMER_IS_ENABLED(uRawValue))
1183 {
1184 /* Auto-enable implies writing to the STIMERx_COUNT MSR is what starts the timer. */
1185 if (!MSR_GIM_HV_STIMER_IS_AUTO_ENABLED(uRawValue))
1186 {
1187 if (!TMTimerIsActive(pVM, pHvStimer->hTimer))
1188 {
1189 gimHvStartStimer(pVCpu, pHvStimer);
1190 Log(("GIM%u: HyperV: Started STIMER%u\n", pVCpu->idCpu, idxStimer));
1191 }
1192 else
1193 {
1194 /*
1195 * Enabling a timer that's already enabled is undefined behaviour,
1196 * see Hyper-V spec. 15.3.1 "Synthetic Timer Configuration Register".
1197 *
1198 * Our implementation just re-starts the timer. Guests that comform to
1199 * the Hyper-V specs. should not be doing this anyway.
1200 */
1201 AssertFailed();
1202 gimHvStopStimer(pVCpu, pHvStimer);
1203 gimHvStartStimer(pVCpu, pHvStimer);
1204 }
1205 }
1206 }
1207
1208 TMTimerUnlock(pVM, pHvStimer->hTimer);
1209 }
1210 return rc;
1211 }
1212#ifndef IN_RING3
1213 return VINF_CPUM_R3_MSR_WRITE;
1214#else
1215 LogRel(("GIM%u: HyperV: Setting reserved bits of STIMER%u MSR (uRawValue=%#RX64) -> #GP(0)\n", pVCpu->idCpu,
1216 idxStimer, uRawValue));
1217 return VERR_CPUM_RAISE_GP_0;
1218#endif
1219 }
1220
1221 case MSR_GIM_HV_STIMER0_COUNT:
1222 case MSR_GIM_HV_STIMER1_COUNT:
1223 case MSR_GIM_HV_STIMER2_COUNT:
1224 case MSR_GIM_HV_STIMER3_COUNT:
1225 {
1226 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1227 uint8_t const idxStimer = (idMsr - MSR_GIM_HV_STIMER0_CONFIG) >> 1;
1228 Assert(idxStimer < RT_ELEMENTS(pHvCpu->aStimers));
1229 PGIMHVSTIMER pHvStimer = &pHvCpu->aStimers[idxStimer];
1230 int const rcBusy = VINF_CPUM_R3_MSR_WRITE;
1231
1232 /*
1233 * Writing zero to this MSR disables the timer regardless of whether the auto-enable
1234 * flag is set in the config MSR corresponding to the timer.
1235 */
1236 if (!uRawValue)
1237 {
1238 gimHvStopStimer(pVCpu, pHvStimer);
1239 pHvStimer->uStimerCountMsr = 0;
1240 Log(("GIM%u: HyperV: Set STIMER_COUNT%u=%RU64, stopped timer\n", pVCpu->idCpu, idxStimer, uRawValue));
1241 return VINF_SUCCESS;
1242 }
1243
1244 /*
1245 * Concurrent writes to the config. MSR can't happen as it's serialized by way
1246 * of being done on the same EMT as this.
1247 */
1248 if (MSR_GIM_HV_STIMER_IS_AUTO_ENABLED(pHvStimer->uStimerConfigMsr))
1249 {
1250 int rc = TMTimerLock(pVM, pHvStimer->hTimer, rcBusy);
1251 if (rc == VINF_SUCCESS)
1252 {
1253 pHvStimer->uStimerCountMsr = uRawValue;
1254 gimHvStartStimer(pVCpu, pHvStimer);
1255 TMTimerUnlock(pVM, pHvStimer->hTimer);
1256 Log(("GIM%u: HyperV: Set STIMER_COUNT%u=%RU64 %RU64 msec, auto-started timer\n", pVCpu->idCpu, idxStimer,
1257 uRawValue, (uRawValue * 100) / RT_NS_1MS_64));
1258 }
1259 return rc;
1260 }
1261
1262 /* Simple update of the counter without any timer start/stop side-effects. */
1263 pHvStimer->uStimerCountMsr = uRawValue;
1264 Log(("GIM%u: HyperV: Set STIMER_COUNT%u=%RU64\n", pVCpu->idCpu, idxStimer, uRawValue));
1265 return VINF_SUCCESS;
1266 }
1267
1268 case MSR_GIM_HV_EOM:
1269 {
1270 /** @todo implement EOM. */
1271 Log(("GIM%u: HyperV: EOM\n", pVCpu->idCpu));
1272 return VINF_SUCCESS;
1273 }
1274
1275 case MSR_GIM_HV_SIEFP:
1276 {
1277#ifndef IN_RING3
1278 return VINF_CPUM_R3_MSR_WRITE;
1279#else
1280 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1281 pHvCpu->uSiefpMsr = uRawValue;
1282 if (MSR_GIM_HV_SIEF_PAGE_IS_ENABLED(uRawValue))
1283 {
1284 RTGCPHYS GCPhysSiefPage = MSR_GIM_HV_SIEF_GUEST_PFN(uRawValue) << GUEST_PAGE_SHIFT;
1285 if (PGMPhysIsGCPhysNormal(pVM, GCPhysSiefPage))
1286 {
1287 int rc = gimR3HvEnableSiefPage(pVCpu, GCPhysSiefPage);
1288 if (RT_SUCCESS(rc))
1289 {
1290 LogRel(("GIM%u: HyperV: Enabled synthetic interrupt event flags page at %#RGp\n", pVCpu->idCpu,
1291 GCPhysSiefPage));
1292 /** @todo SIEF setup. */
1293 return VINF_SUCCESS;
1294 }
1295 }
1296 else
1297 LogRelMax(5, ("GIM%u: HyperV: SIEF page address %#RGp invalid!\n", pVCpu->idCpu, GCPhysSiefPage));
1298 }
1299 else
1300 gimR3HvDisableSiefPage(pVCpu);
1301
1302 return VERR_CPUM_RAISE_GP_0;
1303#endif
1304 break;
1305 }
1306
1307 case MSR_GIM_HV_SIMP:
1308 {
1309#ifndef IN_RING3
1310 return VINF_CPUM_R3_MSR_WRITE;
1311#else
1312 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1313 pHvCpu->uSimpMsr = uRawValue;
1314 if (MSR_GIM_HV_SIMP_IS_ENABLED(uRawValue))
1315 {
1316 RTGCPHYS GCPhysSimp = MSR_GIM_HV_SIMP_GPA(uRawValue);
1317 if (PGMPhysIsGCPhysNormal(pVM, GCPhysSimp))
1318 {
1319 uint8_t abSimp[GIM_HV_PAGE_SIZE];
1320 RT_ZERO(abSimp);
1321 int rc2 = PGMPhysSimpleWriteGCPhys(pVM, GCPhysSimp, &abSimp[0], sizeof(abSimp));
1322 if (RT_SUCCESS(rc2))
1323 LogRel(("GIM%u: HyperV: Enabled synthetic interrupt message page at %#RGp\n", pVCpu->idCpu, GCPhysSimp));
1324 else
1325 {
1326 LogRel(("GIM%u: HyperV: Failed to update synthetic interrupt message page at %#RGp. uSimpMsr=%#RX64 rc=%Rrc\n",
1327 pVCpu->idCpu, pHvCpu->uSimpMsr, GCPhysSimp, rc2));
1328 return VERR_CPUM_RAISE_GP_0;
1329 }
1330 }
1331 else
1332 {
1333 LogRel(("GIM%u: HyperV: Enabled synthetic interrupt message page at invalid address %#RGp\n", pVCpu->idCpu,
1334 GCPhysSimp));
1335 }
1336 }
1337 else
1338 LogRel(("GIM%u: HyperV: Disabled synthetic interrupt message page\n", pVCpu->idCpu));
1339 return VINF_SUCCESS;
1340#endif
1341 }
1342
1343 case MSR_GIM_HV_CRASH_P0: pHv->uCrashP0Msr = uRawValue; return VINF_SUCCESS;
1344 case MSR_GIM_HV_CRASH_P1: pHv->uCrashP1Msr = uRawValue; return VINF_SUCCESS;
1345 case MSR_GIM_HV_CRASH_P2: pHv->uCrashP2Msr = uRawValue; return VINF_SUCCESS;
1346 case MSR_GIM_HV_CRASH_P3: pHv->uCrashP3Msr = uRawValue; return VINF_SUCCESS;
1347 case MSR_GIM_HV_CRASH_P4: pHv->uCrashP4Msr = uRawValue; return VINF_SUCCESS;
1348
1349 case MSR_GIM_HV_TIME_REF_COUNT: /* Read-only MSRs. */
1350 case MSR_GIM_HV_VP_INDEX:
1351 case MSR_GIM_HV_TSC_FREQ:
1352 case MSR_GIM_HV_APIC_FREQ:
1353 LogFunc(("WrMsr on read-only MSR %#RX32 -> #GP(0)\n", idMsr));
1354 break;
1355
1356 case MSR_GIM_HV_DEBUG_OPTIONS_MSR:
1357 {
1358 if (pHv->fIsVendorMsHv)
1359 {
1360#ifndef IN_RING3
1361 return VINF_CPUM_R3_MSR_WRITE;
1362#else
1363 LogRelMax(5, ("GIM: HyperV: Write debug options MSR with %#RX64 ignored\n", uRawValue));
1364 return VINF_SUCCESS;
1365#endif
1366 }
1367 return VERR_CPUM_RAISE_GP_0;
1368 }
1369
1370 default:
1371 {
1372#ifdef IN_RING3
1373 static uint32_t s_cTimes = 0;
1374 if (s_cTimes++ < 20)
1375 LogRel(("GIM: HyperV: Unknown/invalid WrMsr (%#x,%#x`%08x) -> #GP(0)\n", idMsr,
1376 uRawValue & UINT64_C(0xffffffff00000000), uRawValue & UINT64_C(0xffffffff)));
1377 LogFunc(("Unknown/invalid WrMsr (%#RX32,%#RX64) -> #GP(0)\n", idMsr, uRawValue));
1378 break;
1379#else
1380 return VINF_CPUM_R3_MSR_WRITE;
1381#endif
1382 }
1383 }
1384
1385 return VERR_CPUM_RAISE_GP_0;
1386}
1387
1388
1389/**
1390 * Whether we need to trap \#UD exceptions in the guest.
1391 *
1392 * We only needed to trap \#UD exceptions for the old raw-mode guests when
1393 * hypercalls are enabled. For HM VMs, the hypercall would be handled via the
1394 * VMCALL/VMMCALL VM-exit.
1395 *
1396 * @param pVCpu The cross context virtual CPU structure.
1397 */
1398VMM_INT_DECL(bool) gimHvShouldTrapXcptUD(PVMCPU pVCpu)
1399{
1400 RT_NOREF(pVCpu);
1401 return false;
1402}
1403
1404
1405/**
1406 * Checks the instruction and executes the hypercall if it's a valid hypercall
1407 * instruction.
1408 *
1409 * This interface is used by \#UD handlers and IEM.
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pVCpu The cross context virtual CPU structure.
1413 * @param pCtx Pointer to the guest-CPU context.
1414 * @param uDisOpcode The disassembler opcode.
1415 * @param cbInstr The instruction length.
1416 *
1417 * @thread EMT(pVCpu).
1418 */
1419VMM_INT_DECL(VBOXSTRICTRC) gimHvHypercallEx(PVMCPUCC pVCpu, PCPUMCTX pCtx, unsigned uDisOpcode, uint8_t cbInstr)
1420{
1421 Assert(pVCpu);
1422 Assert(pCtx);
1423 VMCPU_ASSERT_EMT(pVCpu);
1424
1425 PVM pVM = pVCpu->CTX_SUFF(pVM);
1426 CPUMCPUVENDOR const enmGuestCpuVendor = (CPUMCPUVENDOR)pVM->cpum.ro.GuestFeatures.enmCpuVendor;
1427 if ( ( uDisOpcode == OP_VMCALL
1428 && ( enmGuestCpuVendor == CPUMCPUVENDOR_INTEL
1429 || enmGuestCpuVendor == CPUMCPUVENDOR_VIA
1430 || enmGuestCpuVendor == CPUMCPUVENDOR_SHANGHAI))
1431 || ( uDisOpcode == OP_VMMCALL
1432 && ( enmGuestCpuVendor == CPUMCPUVENDOR_AMD
1433 || enmGuestCpuVendor == CPUMCPUVENDOR_HYGON)) )
1434 return gimHvHypercall(pVCpu, pCtx);
1435
1436 RT_NOREF_PV(cbInstr);
1437 return VERR_GIM_INVALID_HYPERCALL_INSTR;
1438}
1439
1440
1441/**
1442 * Exception handler for \#UD.
1443 *
1444 * @returns Strict VBox status code.
1445 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
1446 * failed).
1447 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
1448 * @retval VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
1449 * RIP.
1450 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
1451 * @retval VERR_GIM_INVALID_HYPERCALL_INSTR instruction at RIP is not a valid
1452 * hypercall instruction.
1453 *
1454 * @param pVCpu The cross context virtual CPU structure.
1455 * @param pCtx Pointer to the guest-CPU context.
1456 * @param pDis Pointer to the disassembled instruction state at RIP.
1457 * Optional, can be NULL.
1458 * @param pcbInstr Where to store the instruction length of the hypercall
1459 * instruction. Optional, can be NULL.
1460 *
1461 * @thread EMT(pVCpu).
1462 */
1463VMM_INT_DECL(VBOXSTRICTRC) gimHvXcptUD(PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr)
1464{
1465 VMCPU_ASSERT_EMT(pVCpu);
1466
1467 /*
1468 * If we didn't ask for #UD to be trapped, bail.
1469 */
1470 if (!gimHvShouldTrapXcptUD(pVCpu))
1471 return VERR_GIM_IPE_1;
1472
1473 if (!pDis)
1474 {
1475 /*
1476 * Disassemble the instruction at RIP to figure out if it's the Intel VMCALL instruction
1477 * or the AMD VMMCALL instruction and if so, handle it as a hypercall.
1478 */
1479 unsigned cbInstr;
1480 DISCPUSTATE Dis;
1481 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, &Dis, &cbInstr);
1482 if (RT_SUCCESS(rc))
1483 {
1484 if (pcbInstr)
1485 *pcbInstr = (uint8_t)cbInstr;
1486 return gimHvHypercallEx(pVCpu, pCtx, Dis.pCurInstr->uOpcode, Dis.cbInstr);
1487 }
1488
1489 Log(("GIM: HyperV: Failed to disassemble instruction at CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc));
1490 return rc;
1491 }
1492
1493 return gimHvHypercallEx(pVCpu, pCtx, pDis->pCurInstr->uOpcode, pDis->cbInstr);
1494}
1495
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use