VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 99576

Last change on this file since 99576 was 99576, checked in by vboxsync, 2 years ago

VMM: Preparations for getting interrupts injected into the guest. With ARMv8 there are two types of interrupts (normal interrupts and fast interrupts) which need to be mapped to forced action flags. Because the PIC and APIC flags are not needed those are mapped to IRQs and FIQs on ARM respectively, bugref:10389

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.0 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 99576 2023-05-03 10:24:27Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/apic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/hm_vmx.h>
46#include <VBox/vmm/dbgftrace.h>
47#include <VBox/vmm/gcm.h>
48#include "NEMInternal.h"
49#include <VBox/vmm/vmcc.h>
50#include "dtrace/VBoxVMM.h"
51
52#include <iprt/armv8.h>
53#include <iprt/asm.h>
54#include <iprt/ldr.h>
55#include <iprt/mem.h>
56#include <iprt/path.h>
57#include <iprt/string.h>
58#include <iprt/system.h>
59#include <iprt/utf16.h>
60
61#include <mach/mach_time.h>
62#include <mach/kern_return.h>
63
64#include <Hypervisor/Hypervisor.h>
65
66
67/*********************************************************************************************************************************
68* Defined Constants And Macros *
69*********************************************************************************************************************************/
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75
76
77/*********************************************************************************************************************************
78* Global Variables *
79*********************************************************************************************************************************/
80/** NEM_DARWIN_PAGE_STATE_XXX names. */
81NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
82/** The general registers. */
83static const struct
84{
85 hv_reg_t enmHvReg;
86 uint32_t fCpumExtrn;
87 uint32_t offCpumCtx;
88} s_aCpumRegs[] =
89{
90#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
91#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
92 CPUM_GREG_EMIT_X0_X3(0),
93 CPUM_GREG_EMIT_X0_X3(1),
94 CPUM_GREG_EMIT_X0_X3(2),
95 CPUM_GREG_EMIT_X0_X3(3),
96 CPUM_GREG_EMIT_X4_X28(4),
97 CPUM_GREG_EMIT_X4_X28(5),
98 CPUM_GREG_EMIT_X4_X28(6),
99 CPUM_GREG_EMIT_X4_X28(7),
100 CPUM_GREG_EMIT_X4_X28(8),
101 CPUM_GREG_EMIT_X4_X28(9),
102 CPUM_GREG_EMIT_X4_X28(10),
103 CPUM_GREG_EMIT_X4_X28(11),
104 CPUM_GREG_EMIT_X4_X28(12),
105 CPUM_GREG_EMIT_X4_X28(13),
106 CPUM_GREG_EMIT_X4_X28(14),
107 CPUM_GREG_EMIT_X4_X28(15),
108 CPUM_GREG_EMIT_X4_X28(16),
109 CPUM_GREG_EMIT_X4_X28(17),
110 CPUM_GREG_EMIT_X4_X28(18),
111 CPUM_GREG_EMIT_X4_X28(19),
112 CPUM_GREG_EMIT_X4_X28(20),
113 CPUM_GREG_EMIT_X4_X28(21),
114 CPUM_GREG_EMIT_X4_X28(22),
115 CPUM_GREG_EMIT_X4_X28(23),
116 CPUM_GREG_EMIT_X4_X28(24),
117 CPUM_GREG_EMIT_X4_X28(25),
118 CPUM_GREG_EMIT_X4_X28(26),
119 CPUM_GREG_EMIT_X4_X28(27),
120 CPUM_GREG_EMIT_X4_X28(28),
121 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
122 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
123 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
124 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
125 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
126#undef CPUM_GREG_EMIT_X0_X3
127#undef CPUM_GREG_EMIT_X4_X28
128};
129/** SIMD/FP registers. */
130static const struct
131{
132 hv_simd_fp_reg_t enmHvReg;
133 uint32_t offCpumCtx;
134} s_aCpumFpRegs[] =
135{
136#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
137 CPUM_VREG_EMIT(0),
138 CPUM_VREG_EMIT(1),
139 CPUM_VREG_EMIT(2),
140 CPUM_VREG_EMIT(3),
141 CPUM_VREG_EMIT(4),
142 CPUM_VREG_EMIT(5),
143 CPUM_VREG_EMIT(6),
144 CPUM_VREG_EMIT(7),
145 CPUM_VREG_EMIT(8),
146 CPUM_VREG_EMIT(9),
147 CPUM_VREG_EMIT(10),
148 CPUM_VREG_EMIT(11),
149 CPUM_VREG_EMIT(12),
150 CPUM_VREG_EMIT(13),
151 CPUM_VREG_EMIT(14),
152 CPUM_VREG_EMIT(15),
153 CPUM_VREG_EMIT(16),
154 CPUM_VREG_EMIT(17),
155 CPUM_VREG_EMIT(18),
156 CPUM_VREG_EMIT(19),
157 CPUM_VREG_EMIT(20),
158 CPUM_VREG_EMIT(21),
159 CPUM_VREG_EMIT(22),
160 CPUM_VREG_EMIT(23),
161 CPUM_VREG_EMIT(24),
162 CPUM_VREG_EMIT(25),
163 CPUM_VREG_EMIT(26),
164 CPUM_VREG_EMIT(27),
165 CPUM_VREG_EMIT(28),
166 CPUM_VREG_EMIT(29),
167 CPUM_VREG_EMIT(30),
168 CPUM_VREG_EMIT(31)
169#undef CPUM_VREG_EMIT
170};
171/** System registers. */
172static const struct
173{
174 hv_sys_reg_t enmHvReg;
175 uint32_t fCpumExtrn;
176 uint32_t offCpumCtx;
177} s_aCpumSysRegs[] =
178{
179 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
180 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
181 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
182 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
183 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
184 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
185 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
186 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
187};
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193
194
195/**
196 * Converts a HV return code to a VBox status code.
197 *
198 * @returns VBox status code.
199 * @param hrc The HV return code to convert.
200 */
201DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
202{
203 if (hrc == HV_SUCCESS)
204 return VINF_SUCCESS;
205
206 switch (hrc)
207 {
208 case HV_ERROR: return VERR_INVALID_STATE;
209 case HV_BUSY: return VERR_RESOURCE_BUSY;
210 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
211 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
212 case HV_NO_DEVICE: return VERR_NOT_FOUND;
213 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
214 }
215
216 return VERR_IPE_UNEXPECTED_STATUS;
217}
218
219
220/**
221 * Returns a human readable string of the given exception class.
222 *
223 * @returns Pointer to the string matching the given EC.
224 * @param u32Ec The exception class to return the string for.
225 */
226static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
227{
228 switch (u32Ec)
229 {
230#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
231 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
232 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
233 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
234 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
235 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
236 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
237 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
238 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
239 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
240 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
241 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
242 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
243 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
244 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
245 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
246 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
247 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
248 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
249 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
250 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
251 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
252 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
253 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
254 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
255 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
256 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
257 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
258 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
259 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
260 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
261 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
262 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
263 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
264 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
265 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
266 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
267 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
268 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
269 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
270 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
271 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
272 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
273 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
274 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
275 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
276#undef ARMV8_EC_CASE
277 default:
278 break;
279 }
280
281 return "<INVALID>";
282}
283
284
285/**
286 * Resolves a NEM page state from the given protection flags.
287 *
288 * @returns NEM page state.
289 * @param fPageProt The page protection flags.
290 */
291DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
292{
293 switch (fPageProt)
294 {
295 case NEM_PAGE_PROT_NONE:
296 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
297 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
298 return NEM_DARWIN_PAGE_STATE_RX;
299 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
300 return NEM_DARWIN_PAGE_STATE_RW;
301 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
302 return NEM_DARWIN_PAGE_STATE_RWX;
303 default:
304 break;
305 }
306
307 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
308 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
309}
310
311
312/**
313 * Unmaps the given guest physical address range (page aligned).
314 *
315 * @returns VBox status code.
316 * @param pVM The cross context VM structure.
317 * @param GCPhys The guest physical address to start unmapping at.
318 * @param cb The size of the range to unmap in bytes.
319 * @param pu2State Where to store the new state of the unmappd page, optional.
320 */
321DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
322{
323 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
324 {
325 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
326 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
327 return VINF_SUCCESS;
328 }
329
330 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
331 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
332 if (RT_LIKELY(hrc == HV_SUCCESS))
333 {
334 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
335 if (pu2State)
336 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
337 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
338 return VINF_SUCCESS;
339 }
340
341 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
342 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
343 GCPhys, hrc));
344 return VERR_NEM_IPE_6;
345}
346
347
348/**
349 * Maps a given guest physical address range backed by the given memory with the given
350 * protection flags.
351 *
352 * @returns VBox status code.
353 * @param pVM The cross context VM structure.
354 * @param GCPhys The guest physical address to start mapping.
355 * @param pvRam The R3 pointer of the memory to back the range with.
356 * @param cb The size of the range, page aligned.
357 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
358 * @param pu2State Where to store the state for the new page, optional.
359 */
360DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
361{
362 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
363
364 Assert(fPageProt != NEM_PAGE_PROT_NONE);
365 RT_NOREF(pVM);
366
367 hv_memory_flags_t fHvMemProt = 0;
368 if (fPageProt & NEM_PAGE_PROT_READ)
369 fHvMemProt |= HV_MEMORY_READ;
370 if (fPageProt & NEM_PAGE_PROT_WRITE)
371 fHvMemProt |= HV_MEMORY_WRITE;
372 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
373 fHvMemProt |= HV_MEMORY_EXEC;
374
375 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
376 if (hrc == HV_SUCCESS)
377 {
378 if (pu2State)
379 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
380 return VINF_SUCCESS;
381 }
382
383 return nemR3DarwinHvSts2Rc(hrc);
384}
385
386#if 0 /* unused */
387DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
388{
389 hv_memory_flags_t fHvMemProt = 0;
390 if (fPageProt & NEM_PAGE_PROT_READ)
391 fHvMemProt |= HV_MEMORY_READ;
392 if (fPageProt & NEM_PAGE_PROT_WRITE)
393 fHvMemProt |= HV_MEMORY_WRITE;
394 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
395 fHvMemProt |= HV_MEMORY_EXEC;
396
397 hv_return_t hrc;
398 if (pVM->nem.s.fCreatedAsid)
399 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
400 else
401 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
402
403 return nemR3DarwinHvSts2Rc(hrc);
404}
405#endif
406
407DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
408{
409 PGMPAGEMAPLOCK Lock;
410 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
411 if (RT_SUCCESS(rc))
412 PGMPhysReleasePageMappingLock(pVM, &Lock);
413 return rc;
414}
415
416
417DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
418{
419 PGMPAGEMAPLOCK Lock;
420 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
421 if (RT_SUCCESS(rc))
422 PGMPhysReleasePageMappingLock(pVM, &Lock);
423 return rc;
424}
425
426
427#ifdef LOG_ENABLED
428/**
429 * Logs the current CPU state.
430 */
431static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
432{
433 if (LogIs3Enabled())
434 {
435 char szRegs[4096];
436 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
437 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
438 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
439 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
440 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
441 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
442 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
443 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
444 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
445 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
446 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
447 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
448 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
449 );
450 char szInstr[256]; RT_ZERO(szInstr);
451#if 0
452 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
453 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
454 szInstr, sizeof(szInstr), NULL);
455#endif
456 Log3(("%s%s\n", szRegs, szInstr));
457 }
458}
459#endif /* LOG_ENABLED */
460
461
462static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
463{
464 RT_NOREF(pVM);
465 hv_return_t hrc = HV_SUCCESS;
466
467 if (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
468 {
469 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
470 {
471 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
472 {
473 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
474 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
475 }
476 }
477 }
478
479 if ( hrc == HV_SUCCESS
480 && (fWhat & CPUMCTX_EXTRN_V0_V31))
481 {
482 /* SIMD/FP registers. */
483 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
484 {
485 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
486 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
487 }
488 }
489
490 if ( hrc == HV_SUCCESS
491 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR)))
492 {
493 /* System registers. */
494 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
495 {
496 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
497 {
498 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
499 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
500 }
501 }
502 }
503
504 if ( hrc == HV_SUCCESS
505 && (fWhat & CPUMCTX_EXTRN_PSTATE))
506 {
507 uint64_t u64Tmp;
508 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
509 if (hrc == HV_SUCCESS)
510 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
511 }
512
513 /* Almost done, just update extern flags. */
514 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
515 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
516 pVCpu->cpum.GstCtx.fExtrn = 0;
517
518 return nemR3DarwinHvSts2Rc(hrc);
519}
520
521
522/**
523 * Exports the guest state to HV for execution.
524 *
525 * @returns VBox status code.
526 * @param pVM The cross context VM structure.
527 * @param pVCpu The cross context virtual CPU structure of the
528 * calling EMT.
529 */
530static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
531{
532 RT_NOREF(pVM);
533 hv_return_t hrc = HV_SUCCESS;
534
535 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
536 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
537 {
538 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
539 {
540 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
541 {
542 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
543 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
544 }
545 }
546 }
547
548 if ( hrc == HV_SUCCESS
549 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
550 {
551 /* SIMD/FP registers. */
552 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
553 {
554 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
555 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
556 }
557 }
558
559 if ( hrc == HV_SUCCESS
560 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
561 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
562 {
563 /* System registers. */
564 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
565 {
566 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
567 {
568 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
569 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
570 }
571 }
572 }
573
574 if ( hrc == HV_SUCCESS
575 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
576 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
577
578 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
579 return nemR3DarwinHvSts2Rc(hrc);
580}
581
582
583/**
584 * Try initialize the native API.
585 *
586 * This may only do part of the job, more can be done in
587 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
588 *
589 * @returns VBox status code.
590 * @param pVM The cross context VM structure.
591 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
592 * the latter we'll fail if we cannot initialize.
593 * @param fForced Whether the HMForced flag is set and we should
594 * fail if we cannot initialize.
595 */
596int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
597{
598 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
599
600 /*
601 * Some state init.
602 */
603 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
604 RT_NOREF(pCfgNem);
605
606 /*
607 * Error state.
608 * The error message will be non-empty on failure and 'rc' will be set too.
609 */
610 RTERRINFOSTATIC ErrInfo;
611 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
612
613 int rc = VINF_SUCCESS;
614 hv_return_t hrc = hv_vm_create(NULL);
615 if (hrc == HV_SUCCESS)
616 {
617 pVM->nem.s.fCreatedVm = true;
618 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
619 Log(("NEM: Marked active!\n"));
620 PGMR3EnableNemMode(pVM);
621 }
622 else
623 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
624 "hv_vm_create() failed: %#x", hrc);
625
626 /*
627 * We only fail if in forced mode, otherwise just log the complaint and return.
628 */
629 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
630 if ( (fForced || !fFallback)
631 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
632 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
633
634if (RTErrInfoIsSet(pErrInfo))
635 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
636 return VINF_SUCCESS;
637}
638
639
640/**
641 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
642 *
643 * @returns VBox status code
644 * @param pVM The VM handle.
645 * @param pVCpu The vCPU handle.
646 * @param idCpu ID of the CPU to create.
647 */
648static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
649{
650 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, NULL);
651 if (hrc != HV_SUCCESS)
652 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
653 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
654
655 if (idCpu == 0)
656 {
657 /** @todo */
658 }
659
660 return VINF_SUCCESS;
661}
662
663
664/**
665 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
666 *
667 * @returns VBox status code
668 * @param pVCpu The vCPU handle.
669 */
670static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
671{
672 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
673 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
674 return VINF_SUCCESS;
675}
676
677
678/**
679 * This is called after CPUMR3Init is done.
680 *
681 * @returns VBox status code.
682 * @param pVM The VM handle..
683 */
684int nemR3NativeInitAfterCPUM(PVM pVM)
685{
686 /*
687 * Validate sanity.
688 */
689 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
690 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
691
692 /*
693 * Setup the EMTs.
694 */
695 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
696 {
697 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
698
699 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
700 if (RT_FAILURE(rc))
701 {
702 /* Rollback. */
703 while (idCpu--)
704 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
705
706 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
707 }
708 }
709
710 pVM->nem.s.fCreatedEmts = true;
711 return VINF_SUCCESS;
712}
713
714
715int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
716{
717 RT_NOREF(pVM, enmWhat);
718 return VINF_SUCCESS;
719}
720
721
722int nemR3NativeTerm(PVM pVM)
723{
724 /*
725 * Delete the VM.
726 */
727
728 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
729 {
730 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
731
732 /*
733 * Apple's documentation states that the vCPU should be destroyed
734 * on the thread running the vCPU but as all the other EMTs are gone
735 * at this point, destroying the VM would hang.
736 *
737 * We seem to be at luck here though as destroying apparently works
738 * from EMT(0) as well.
739 */
740 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
741 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
742 }
743
744 pVM->nem.s.fCreatedEmts = false;
745 if (pVM->nem.s.fCreatedVm)
746 {
747 hv_return_t hrc = hv_vm_destroy();
748 if (hrc != HV_SUCCESS)
749 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
750
751 pVM->nem.s.fCreatedVm = false;
752 }
753 return VINF_SUCCESS;
754}
755
756
757/**
758 * VM reset notification.
759 *
760 * @param pVM The cross context VM structure.
761 */
762void nemR3NativeReset(PVM pVM)
763{
764 RT_NOREF(pVM);
765}
766
767
768/**
769 * Reset CPU due to INIT IPI or hot (un)plugging.
770 *
771 * @param pVCpu The cross context virtual CPU structure of the CPU being
772 * reset.
773 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
774 */
775void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
776{
777 RT_NOREF(pVCpu, fInitIpi);
778}
779
780
781/**
782 * Returns the byte size from the given access SAS value.
783 *
784 * @returns Number of bytes to transfer.
785 * @param uSas The SAS value to convert.
786 */
787DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
788{
789 switch (uSas)
790 {
791 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
792 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
793 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
794 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
795 default:
796 AssertReleaseFailed();
797 }
798
799 return 0;
800}
801
802
803/**
804 * Sets the given general purpose register to the given value.
805 *
806 * @returns nothing.
807 * @param pVCpu The cross context virtual CPU structure of the
808 * calling EMT.
809 * @param uReg The register index.
810 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
811 * @param fSignExtend Flag whether to sign extend the value.
812 * @param u64Val The value.
813 */
814DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
815{
816 AssertReturnVoid(uReg < 31);
817
818 if (f64BitReg)
819 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
820 else
821 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
822
823 /* Mark the register as not extern anymore. */
824 switch (uReg)
825 {
826 case 0:
827 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
828 break;
829 case 1:
830 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
831 break;
832 case 2:
833 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
834 break;
835 case 3:
836 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
837 break;
838 default:
839 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
840 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
841 }
842}
843
844
845/**
846 * Gets the given general purpose register and returns the value.
847 *
848 * @returns Value from the given register.
849 * @param pVCpu The cross context virtual CPU structure of the
850 * calling EMT.
851 * @param uReg The register index.
852 */
853DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
854{
855 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
856
857 if (uReg == ARMV8_AARCH64_REG_ZR)
858 return 0;
859
860 /** @todo Import the register if extern. */
861 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
862
863 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
864}
865
866
867/**
868 * Works on the data abort exception (which will be a MMIO access most of the time).
869 *
870 * @returns VBox strict status code.
871 * @param pVM The cross context VM structure.
872 * @param pVCpu The cross context virtual CPU structure of the
873 * calling EMT.
874 * @param uIss The instruction specific syndrome value.
875 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
876 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
877 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
878 */
879static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
880 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
881{
882 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
883 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
884 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
885 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
886 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
887 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
888 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
889 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
890 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
891 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
892
893 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
894
895 EMHistoryAddExit(pVCpu,
896 fWrite
897 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
898 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
899 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
900
901 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
902 uint64_t u64Val = 0;
903 if (fWrite)
904 {
905 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
906 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
907 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
908 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
909 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
910 }
911 else
912 {
913 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
914 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
915 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
916 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
917 if (rcStrict == VINF_SUCCESS)
918 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
919 }
920
921 if (rcStrict == VINF_SUCCESS)
922 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
923
924 return rcStrict;
925}
926
927
928/**
929 * Works on the trapped MRS, MSR and system instruction exception.
930 *
931 * @returns VBox strict status code.
932 * @param pVM The cross context VM structure.
933 * @param pVCpu The cross context virtual CPU structure of the
934 * calling EMT.
935 * @param uIss The instruction specific syndrome value.
936 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
937 */
938static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
939{
940 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
941 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
942 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
943 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
944 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
945 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
946 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
947 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
948 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
949 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
950
951 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
952 EMHistoryAddExit(pVCpu,
953 fRead
954 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
955 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
956 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
957
958 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
959 uint64_t u64Val = 0;
960 if (fRead)
961 {
962 RT_NOREF(pVM);
963 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
964 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
965 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
966 VBOXSTRICTRC_VAL(rcStrict) ));
967 if (rcStrict == VINF_SUCCESS)
968 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
969 }
970 else
971 {
972 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
973 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
974 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
975 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
976 VBOXSTRICTRC_VAL(rcStrict) ));
977 }
978
979 if (rcStrict == VINF_SUCCESS)
980 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
981
982 return rcStrict;
983}
984
985
986/**
987 * Works on the trapped HVC instruction exception.
988 *
989 * @returns VBox strict status code.
990 * @param pVM The cross context VM structure.
991 * @param pVCpu The cross context virtual CPU structure of the
992 * calling EMT.
993 * @param uIss The instruction specific syndrome value.
994 */
995static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss)
996{
997 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
998 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
999
1000#if 0 /** @todo For later */
1001 EMHistoryAddExit(pVCpu,
1002 fRead
1003 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1004 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1005 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1006#endif
1007
1008 RT_NOREF(pVM);
1009 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1010 /** @todo Raise exception to EL1 if PSCI not configured. */
1011 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. Always return -1 for now (PSCI). */
1012 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)-1);
1013
1014 return rcStrict;
1015}
1016
1017
1018/**
1019 * Handles an exception VM exit.
1020 *
1021 * @returns VBox strict status code.
1022 * @param pVM The cross context VM structure.
1023 * @param pVCpu The cross context virtual CPU structure of the
1024 * calling EMT.
1025 * @param pExit Pointer to the exit information.
1026 */
1027static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1028{
1029 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1030 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1031 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1032
1033 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1034 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1035
1036 switch (uEc)
1037 {
1038 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1039 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1040 pExit->exception.physical_address);
1041 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1042 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1043 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1044 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1045 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1046 return VINF_EM_HALT;
1047 case ARMV8_ESR_EL2_EC_UNKNOWN:
1048 default:
1049 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1050 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1051 AssertReleaseFailed();
1052 return VERR_NOT_IMPLEMENTED;
1053 }
1054
1055 return VINF_SUCCESS;
1056}
1057
1058
1059/**
1060 * Handles an exit from hv_vcpu_run().
1061 *
1062 * @returns VBox strict status code.
1063 * @param pVM The cross context VM structure.
1064 * @param pVCpu The cross context virtual CPU structure of the
1065 * calling EMT.
1066 */
1067static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1068{
1069 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1070 if (RT_FAILURE(rc))
1071 return rc;
1072
1073#ifdef LOG_ENABLED
1074 if (LogIs3Enabled())
1075 nemR3DarwinLogState(pVM, pVCpu);
1076#endif
1077
1078 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1079 switch (pExit->reason)
1080 {
1081 case HV_EXIT_REASON_CANCELED:
1082 return VINF_EM_RAW_INTERRUPT;
1083 case HV_EXIT_REASON_EXCEPTION:
1084 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1085 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1086 /** @todo Set interrupt. */
1087 return VINF_EM_RESCHEDULE;
1088 default:
1089 AssertReleaseFailed();
1090 break;
1091 }
1092
1093 return VERR_INVALID_STATE;
1094}
1095
1096
1097/**
1098 * Runs the guest once until an exit occurs.
1099 *
1100 * @returns HV status code.
1101 * @param pVM The cross context VM structure.
1102 * @param pVCpu The cross context virtual CPU structure.
1103 */
1104static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1105{
1106 TMNotifyStartOfExecution(pVM, pVCpu);
1107
1108 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1109
1110 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1111
1112 return hrc;
1113}
1114
1115
1116/**
1117 * Prepares the VM to run the guest.
1118 *
1119 * @returns Strict VBox status code.
1120 * @param pVM The cross context VM structure.
1121 * @param pVCpu The cross context virtual CPU structure.
1122 * @param fSingleStepping Flag whether we run in single stepping mode.
1123 */
1124static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1125{
1126#ifdef LOG_ENABLED
1127 bool fIrq = false;
1128 bool fFiq = false;
1129
1130 if (LogIs3Enabled())
1131 nemR3DarwinLogState(pVM, pVCpu);
1132#endif
1133
1134 /** @todo */ RT_NOREF(fSingleStepping);
1135 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1136 AssertRCReturn(rc, rc);
1137
1138 /* Set the pending interrupt state. */
1139 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
1140 {
1141 hv_return_t hrc = HV_SUCCESS;
1142
1143 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
1144 {
1145 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
1146 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1147#ifdef LOG_ENABLED
1148 fIrq = true;
1149#endif
1150 }
1151
1152 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
1153 {
1154 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
1155 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1156#ifdef LOG_ENABLED
1157 fFiq = true;
1158#endif
1159 }
1160 }
1161 else
1162 {
1163 hv_return_t hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
1164 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1165
1166 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
1167 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1168 }
1169
1170 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
1171 pVCpu->nem.s.fEventPending = false;
1172 return VINF_SUCCESS;
1173}
1174
1175
1176/**
1177 * The normal runloop (no debugging features enabled).
1178 *
1179 * @returns Strict VBox status code.
1180 * @param pVM The cross context VM structure.
1181 * @param pVCpu The cross context virtual CPU structure.
1182 */
1183static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1184{
1185 /*
1186 * The run loop.
1187 *
1188 * Current approach to state updating to use the sledgehammer and sync
1189 * everything every time. This will be optimized later.
1190 */
1191
1192 /*
1193 * Poll timers and run for a bit.
1194 */
1195 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1196 * the whole polling job when timers have changed... */
1197 uint64_t offDeltaIgnored;
1198 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1199 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1200 for (unsigned iLoop = 0;; iLoop++)
1201 {
1202 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1203 if (rcStrict != VINF_SUCCESS)
1204 break;
1205
1206 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1207 if (hrc == HV_SUCCESS)
1208 {
1209 /*
1210 * Deal with the message.
1211 */
1212 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1213 if (rcStrict == VINF_SUCCESS)
1214 { /* hopefully likely */ }
1215 else
1216 {
1217 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1218 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1219 break;
1220 }
1221 }
1222 else
1223 {
1224 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1225 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1226 }
1227 } /* the run loop */
1228
1229 return rcStrict;
1230}
1231
1232
1233VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1234{
1235#ifdef LOG_ENABLED
1236 if (LogIs3Enabled())
1237 nemR3DarwinLogState(pVM, pVCpu);
1238#endif
1239
1240 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1241
1242 /*
1243 * Try switch to NEM runloop state.
1244 */
1245 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1246 { /* likely */ }
1247 else
1248 {
1249 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1250 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1251 return VINF_SUCCESS;
1252 }
1253
1254 VBOXSTRICTRC rcStrict;
1255#if 0
1256 if ( !pVCpu->nem.s.fUseDebugLoop
1257 && !nemR3DarwinAnyExpensiveProbesEnabled()
1258 && !DBGFIsStepping(pVCpu)
1259 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1260#endif
1261 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1262#if 0
1263 else
1264 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1265#endif
1266
1267 if (rcStrict == VINF_EM_RAW_TO_R3)
1268 rcStrict = VINF_SUCCESS;
1269
1270 /*
1271 * Convert any pending HM events back to TRPM due to premature exits.
1272 *
1273 * This is because execution may continue from IEM and we would need to inject
1274 * the event from there (hence place it back in TRPM).
1275 */
1276 if (pVCpu->nem.s.fEventPending)
1277 {
1278 /** @todo */
1279 }
1280
1281
1282 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1283 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1284
1285 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1286 {
1287 /* Try anticipate what we might need. */
1288 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1289 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1290 || RT_FAILURE(rcStrict))
1291 fImport = CPUMCTX_EXTRN_ALL;
1292 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
1293 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1294 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1295
1296 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1297 {
1298 /* Only import what is external currently. */
1299 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1300 if (RT_SUCCESS(rc2))
1301 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1302 else if (RT_SUCCESS(rcStrict))
1303 rcStrict = rc2;
1304 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1305 pVCpu->cpum.GstCtx.fExtrn = 0;
1306 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1307 }
1308 else
1309 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1314 pVCpu->cpum.GstCtx.fExtrn = 0;
1315 }
1316
1317 return rcStrict;
1318}
1319
1320
1321VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1322{
1323 RT_NOREF(pVM, pVCpu);
1324 return true; /** @todo Are there any cases where we have to emulate? */
1325}
1326
1327
1328bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1329{
1330 VMCPU_ASSERT_EMT(pVCpu);
1331 bool fOld = pVCpu->nem.s.fSingleInstruction;
1332 pVCpu->nem.s.fSingleInstruction = fEnable;
1333 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1334 return fOld;
1335}
1336
1337
1338void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1339{
1340 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1341
1342 RT_NOREF(pVM, fFlags);
1343
1344 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1345 if (hrc != HV_SUCCESS)
1346 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1347}
1348
1349
1350DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1351{
1352 RT_NOREF(pVM, fUseDebugLoop);
1353 AssertReleaseFailed();
1354 return false;
1355}
1356
1357
1358DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1359{
1360 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1361 return fUseDebugLoop;
1362}
1363
1364
1365VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1366 uint8_t *pu2State, uint32_t *puNemRange)
1367{
1368 RT_NOREF(pVM, puNemRange);
1369
1370 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1371#if defined(VBOX_WITH_PGM_NEM_MODE)
1372 if (pvR3)
1373 {
1374 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1375 if (RT_FAILURE(rc))
1376 {
1377 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1378 return VERR_NEM_MAP_PAGES_FAILED;
1379 }
1380 }
1381 return VINF_SUCCESS;
1382#else
1383 RT_NOREF(pVM, GCPhys, cb, pvR3);
1384 return VERR_NEM_MAP_PAGES_FAILED;
1385#endif
1386}
1387
1388
1389VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1390{
1391 RT_NOREF(pVM);
1392 return false;
1393}
1394
1395
1396VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1397 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1398{
1399 RT_NOREF(pVM, puNemRange, pvRam, fFlags);
1400
1401 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1402 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1403
1404#if defined(VBOX_WITH_PGM_NEM_MODE)
1405 /*
1406 * Unmap the RAM we're replacing.
1407 */
1408 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1409 {
1410 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1411 if (RT_SUCCESS(rc))
1412 { /* likely */ }
1413 else if (pvMmio2)
1414 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1415 GCPhys, cb, fFlags, rc));
1416 else
1417 {
1418 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1419 GCPhys, cb, fFlags, rc));
1420 return VERR_NEM_UNMAP_PAGES_FAILED;
1421 }
1422 }
1423
1424 /*
1425 * Map MMIO2 if any.
1426 */
1427 if (pvMmio2)
1428 {
1429 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1430 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1431 if (RT_FAILURE(rc))
1432 {
1433 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1434 GCPhys, cb, fFlags, pvMmio2, rc));
1435 return VERR_NEM_MAP_PAGES_FAILED;
1436 }
1437 }
1438 else
1439 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1440
1441#else
1442 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1443 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1444#endif
1445 return VINF_SUCCESS;
1446}
1447
1448
1449VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1450 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1451{
1452 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1453 return VINF_SUCCESS;
1454}
1455
1456
1457VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1458 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1459{
1460 RT_NOREF(pVM, puNemRange);
1461
1462 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1463 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1464
1465 int rc = VINF_SUCCESS;
1466#if defined(VBOX_WITH_PGM_NEM_MODE)
1467 /*
1468 * Unmap the MMIO2 pages.
1469 */
1470 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1471 * we may have more stuff to unmap even in case of pure MMIO... */
1472 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1473 {
1474 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1475 if (RT_FAILURE(rc))
1476 {
1477 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1478 GCPhys, cb, fFlags, rc));
1479 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1480 }
1481 }
1482
1483 /* Ensure the page is masked as unmapped if relevant. */
1484 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1485
1486 /*
1487 * Restore the RAM we replaced.
1488 */
1489 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1490 {
1491 AssertPtr(pvRam);
1492 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1493 if (RT_SUCCESS(rc))
1494 { /* likely */ }
1495 else
1496 {
1497 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1498 rc = VERR_NEM_MAP_PAGES_FAILED;
1499 }
1500 }
1501
1502 RT_NOREF(pvMmio2);
1503#else
1504 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1505 if (pu2State)
1506 *pu2State = UINT8_MAX;
1507 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1508#endif
1509 return rc;
1510}
1511
1512
1513VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1514 void *pvBitmap, size_t cbBitmap)
1515{
1516 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
1517 AssertReleaseFailed();
1518 return VERR_NOT_IMPLEMENTED;
1519}
1520
1521
1522VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1523 uint8_t *pu2State, uint32_t *puNemRange)
1524{
1525 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1526
1527 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1528 *pu2State = UINT8_MAX;
1529 *puNemRange = 0;
1530 return VINF_SUCCESS;
1531}
1532
1533
1534VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1535 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1536{
1537 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1538 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1539 *pu2State = UINT8_MAX;
1540
1541#if defined(VBOX_WITH_PGM_NEM_MODE)
1542 /*
1543 * (Re-)map readonly.
1544 */
1545 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
1546 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
1547 if (RT_FAILURE(rc))
1548 {
1549 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
1550 GCPhys, cb, pvPages, fFlags, rc));
1551 return VERR_NEM_MAP_PAGES_FAILED;
1552 }
1553 RT_NOREF(fFlags, puNemRange);
1554 return VINF_SUCCESS;
1555#else
1556 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1557 return VERR_NEM_MAP_PAGES_FAILED;
1558#endif
1559}
1560
1561
1562VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1563 RTR3PTR pvMemR3, uint8_t *pu2State)
1564{
1565 RT_NOREF(pVM);
1566
1567 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1568 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1569
1570 *pu2State = UINT8_MAX;
1571#if defined(VBOX_WITH_PGM_NEM_MODE)
1572 if (pvMemR3)
1573 {
1574 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1575 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
1576 pvMemR3, GCPhys, cb, rc));
1577 }
1578 RT_NOREF(enmKind);
1579#else
1580 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1581 AssertFailed();
1582#endif
1583}
1584
1585
1586VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1587{
1588 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
1589 RT_NOREF(pVCpu, fEnabled);
1590}
1591
1592
1593void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1594{
1595 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1596 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1597}
1598
1599
1600void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1601 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1602{
1603 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1604 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1605 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1606}
1607
1608
1609int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1610 PGMPAGETYPE enmType, uint8_t *pu2State)
1611{
1612 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1613 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1614 RT_NOREF(HCPhys, fPageProt, enmType);
1615
1616 return nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1617}
1618
1619
1620VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1621 PGMPAGETYPE enmType, uint8_t *pu2State)
1622{
1623 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1624 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1625 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
1626
1627 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1628}
1629
1630
1631VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1632 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1633{
1634 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1635 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
1636 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
1637
1638 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1639}
1640
1641
1642/**
1643 * Interface for importing state on demand (used by IEM).
1644 *
1645 * @returns VBox status code.
1646 * @param pVCpu The cross context CPU structure.
1647 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1648 */
1649VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1650{
1651 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
1652 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1653
1654 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
1655}
1656
1657
1658/**
1659 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1660 *
1661 * @returns VBox status code.
1662 * @param pVCpu The cross context CPU structure.
1663 * @param pcTicks Where to return the CPU tick count.
1664 * @param puAux Where to return the TSC_AUX register value.
1665 */
1666VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1667{
1668 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
1669 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1670
1671 AssertReleaseFailed();
1672 return VERR_NOT_IMPLEMENTED;
1673}
1674
1675
1676/**
1677 * Resumes CPU clock (TSC) on all virtual CPUs.
1678 *
1679 * This is called by TM when the VM is started, restored, resumed or similar.
1680 *
1681 * @returns VBox status code.
1682 * @param pVM The cross context VM structure.
1683 * @param pVCpu The cross context CPU structure of the calling EMT.
1684 * @param uPausedTscValue The TSC value at the time of pausing.
1685 */
1686VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1687{
1688 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVCpu, uPausedTscValue));
1689 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1690 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1691
1692 //AssertReleaseFailed();
1693 return VINF_SUCCESS;
1694}
1695
1696
1697/**
1698 * Returns features supported by the NEM backend.
1699 *
1700 * @returns Flags of features supported by the native NEM backend.
1701 * @param pVM The cross context VM structure.
1702 */
1703VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
1704{
1705 RT_NOREF(pVM);
1706 /*
1707 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
1708 * and unrestricted guest execution support so we can safely return these flags here always.
1709 */
1710 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
1711}
1712
1713
1714/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
1715 *
1716 * @todo Add notes as the implementation progresses...
1717 */
1718
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette