VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp

Last change on this file was 108843, checked in by vboxsync, 4 weeks ago

VMM/PGM,NEM: Some early page table management infrastructure for ARMv8, bugref:10388

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 133.0 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 108843 2025-04-04 08:36:32Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#include <VBox/vmm/nem.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/pdmgic.h>
42#include <VBox/vmm/pdm.h>
43#include <VBox/vmm/dbgftrace.h>
44#include <VBox/vmm/gcm.h>
45#include "NEMInternal.h"
46#include <VBox/vmm/vmcc.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/dis.h>
49#include <VBox/gic.h>
50#include "dtrace/VBoxVMM.h"
51
52#include <iprt/armv8.h>
53#include <iprt/asm.h>
54#include <iprt/asm-arm.h>
55#include <iprt/asm-math.h>
56#include <iprt/ldr.h>
57#include <iprt/mem.h>
58#include <iprt/path.h>
59#include <iprt/string.h>
60#include <iprt/system.h>
61#include <iprt/utf16.h>
62
63#include <iprt/formats/arm-psci.h>
64
65#include <mach/mach_time.h>
66#include <mach/kern_return.h>
67
68#include <Hypervisor/Hypervisor.h>
69
70
71/*********************************************************************************************************************************
72* Defined Constants And Macros *
73*********************************************************************************************************************************/
74
75
76/*********************************************************************************************************************************
77* Structures and Typedefs *
78*********************************************************************************************************************************/
79
80#if MAC_OS_X_VERSION_MIN_REQUIRED < 150000
81
82/* Since 15.0+ */
83typedef enum hv_gic_distributor_reg_t : uint16_t
84{
85 HV_GIC_DISTRIBUTOR_REG_GICD_CTLR,
86 HV_GIC_DISTRIBUTOR_REG_GICD_ICACTIVER0
87 /** @todo */
88} hv_gic_distributor_reg_t;
89
90
91typedef enum hv_gic_icc_reg_t : uint16_t
92{
93 HV_GIC_ICC_REG_PMR_EL1,
94 HV_GIC_ICC_REG_BPR0_EL1,
95 HV_GIC_ICC_REG_AP0R0_EL1,
96 HV_GIC_ICC_REG_AP1R0_EL1,
97 HV_GIC_ICC_REG_RPR_EL1,
98 HV_GIC_ICC_REG_BPR1_EL1,
99 HV_GIC_ICC_REG_CTLR_EL1,
100 HV_GIC_ICC_REG_SRE_EL1,
101 HV_GIC_ICC_REG_IGRPEN0_EL1,
102 HV_GIC_ICC_REG_IGRPEN1_EL1,
103 HV_GIC_ICC_REG_INVALID,
104 /** @todo */
105} hv_gic_icc_reg_t;
106
107
108typedef enum hv_gic_ich_reg_t : uint16_t
109{
110 HV_GIC_ICH_REG_AP0R0_EL2
111 /** @todo */
112} hv_gic_ich_reg_t;
113
114
115typedef enum hv_gic_icv_reg_t : uint16_t
116{
117 HV_GIC_ICV_REG_AP0R0_EL1
118 /** @todo */
119} hv_gic_icv_reg_t;
120
121
122typedef enum hv_gic_msi_reg_t : uint16_t
123{
124 HV_GIC_REG_GICM_SET_SPI_NSR
125 /** @todo */
126} hv_gic_msi_reg_t;
127
128
129typedef enum hv_gic_redistributor_reg_t : uint16_t
130{
131 HV_GIC_REDISTRIBUTOR_REG_GICR_ICACTIVER0
132 /** @todo */
133} hv_gic_redistributor_reg_t;
134
135
136typedef enum hv_gic_intid_t : uint16_t
137{
138 HV_GIC_INT_EL1_PHYSICAL_TIMER = 23,
139 HV_GIC_INT_EL1_VIRTUAL_TIMER = 25,
140 HV_GIC_INT_EL2_PHYSICAL_TIMER = 26,
141 HV_GIC_INT_MAINTENANCE = 27,
142 HV_GIC_INT_PERFORMANCE_MONITOR = 30
143} hv_gic_intid_t;
144
145# define HV_SYS_REG_ACTLR_EL1 (hv_sys_reg_t)0xc081
146
147#else
148# define HV_GIC_ICC_REG_INVALID (hv_gic_icc_reg_t)UINT16_MAX
149#endif
150
151typedef hv_vm_config_t FN_HV_VM_CONFIG_CREATE(void);
152typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_SUPPORTED(bool *el2_supported);
153typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_ENABLED(hv_vm_config_t config, bool *el2_enabled);
154typedef hv_return_t FN_HV_VM_CONFIG_SET_EL2_ENABLED(hv_vm_config_t config, bool el2_enabled);
155
156typedef struct hv_gic_config_s *hv_gic_config_t;
157typedef hv_return_t FN_HV_GIC_CREATE(hv_gic_config_t gic_config);
158typedef hv_return_t FN_HV_GIC_RESET(void);
159typedef hv_gic_config_t FN_HV_GIC_CONFIG_CREATE(void);
160typedef hv_return_t FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t distributor_base_address);
161typedef hv_return_t FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t redistributor_base_address);
162typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE(hv_gic_config_t config, hv_ipa_t msi_region_base_address);
163typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE(hv_gic_config_t config, uint32_t msi_intid_base, uint32_t msi_intid_count);
164
165typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE(hv_vcpu_t vcpu, hv_ipa_t *redistributor_base_address);
166typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE(size_t *redistributor_region_size);
167typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_SIZE(size_t *redistributor_size);
168typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_SIZE(size_t *distributor_size);
169typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT(size_t *distributor_base_alignment);
170typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT(size_t *redistributor_base_alignment);
171typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT(size_t *msi_region_base_alignment);
172typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_SIZE(size_t *msi_region_size);
173typedef hv_return_t FN_HV_GIC_GET_SPI_INTERRUPT_RANGE(uint32_t *spi_intid_base, uint32_t *spi_intid_count);
174
175typedef struct hv_gic_state_s *hv_gic_state_t;
176typedef hv_gic_state_t FN_HV_GIC_STATE_CREATE(void);
177typedef hv_return_t FN_HV_GIC_SET_STATE(const void *gic_state_data, size_t gic_state_size);
178typedef hv_return_t FN_HV_GIC_STATE_GET_SIZE(hv_gic_state_t state, size_t *gic_state_size);
179typedef hv_return_t FN_HV_GIC_STATE_GET_DATA(hv_gic_state_t state, void *gic_state_data);
180
181typedef hv_return_t FN_HV_GIC_SEND_MSI(hv_ipa_t address, uint32_t intid);
182typedef hv_return_t FN_HV_GIC_SET_SPI(uint32_t intid, bool level);
183
184typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t *value);
185typedef hv_return_t FN_HV_GIC_GET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t *value);
186typedef hv_return_t FN_HV_GIC_GET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t *value);
187typedef hv_return_t FN_HV_GIC_GET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t *value);
188typedef hv_return_t FN_HV_GIC_GET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t *value);
189typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t *value);
190
191typedef hv_return_t FN_HV_GIC_SET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t value);
192typedef hv_return_t FN_HV_GIC_SET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t value);
193typedef hv_return_t FN_HV_GIC_SET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t value);
194typedef hv_return_t FN_HV_GIC_SET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t value);
195typedef hv_return_t FN_HV_GIC_SET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t value);
196typedef hv_return_t FN_HV_GIC_SET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t value);
197
198typedef hv_return_t FN_HV_GIC_GET_INTID(hv_gic_intid_t interrupt, uint32_t *intid);
199
200
201/*********************************************************************************************************************************
202* Global Variables *
203*********************************************************************************************************************************/
204/** @name Optional APIs imported from Hypervisor.framework.
205 * @{ */
206static FN_HV_VM_CONFIG_CREATE *g_pfnHvVmConfigCreate = NULL; /* Since 13.0 */
207static FN_HV_VM_CONFIG_GET_EL2_SUPPORTED *g_pfnHvVmConfigGetEl2Supported = NULL; /* Since 15.0 */
208static FN_HV_VM_CONFIG_GET_EL2_ENABLED *g_pfnHvVmConfigGetEl2Enabled = NULL; /* Since 15.0 */
209static FN_HV_VM_CONFIG_SET_EL2_ENABLED *g_pfnHvVmConfigSetEl2Enabled = NULL; /* Since 15.0 */
210
211static FN_HV_GIC_CREATE *g_pfnHvGicCreate = NULL; /* Since 15.0 */
212static FN_HV_GIC_RESET *g_pfnHvGicReset = NULL; /* Since 15.0 */
213static FN_HV_GIC_CONFIG_CREATE *g_pfnHvGicConfigCreate = NULL; /* Since 15.0 */
214static FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE *g_pfnHvGicConfigSetDistributorBase = NULL; /* Since 15.0 */
215static FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE *g_pfnHvGicConfigSetRedistributorBase = NULL; /* Since 15.0 */
216static FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE *g_pfnHvGicConfigSetMsiRegionBase = NULL; /* Since 15.0 */
217static FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE *g_pfnHvGicConfigSetMsiInterruptRange = NULL; /* Since 15.0 */
218static FN_HV_GIC_GET_REDISTRIBUTOR_BASE *g_pfnHvGicGetRedistributorBase = NULL; /* Since 15.0 */
219static FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE *g_pfnHvGicGetRedistributorRegionSize = NULL; /* Since 15.0 */
220static FN_HV_GIC_GET_REDISTRIBUTOR_SIZE *g_pfnHvGicGetRedistributorSize = NULL; /* Since 15.0 */
221static FN_HV_GIC_GET_DISTRIBUTOR_SIZE *g_pfnHvGicGetDistributorSize = NULL; /* Since 15.0 */
222static FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetDistributorBaseAlignment = NULL; /* Since 15.0 */
223static FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetRedistributorBaseAlignment = NULL; /* Since 15.0 */
224static FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT *g_pfnHvGicGetMsiRegionBaseAlignment = NULL; /* Since 15.0 */
225static FN_HV_GIC_GET_MSI_REGION_SIZE *g_pfnHvGicGetMsiRegionSize = NULL; /* Since 15.0 */
226static FN_HV_GIC_GET_SPI_INTERRUPT_RANGE *g_pfnHvGicGetSpiInterruptRange = NULL; /* Since 15.0 */
227static FN_HV_GIC_STATE_CREATE *g_pfnHvGicStateCreate = NULL; /* Since 15.0 */
228static FN_HV_GIC_SET_STATE *g_pfnHvGicSetState = NULL; /* Since 15.0 */
229static FN_HV_GIC_STATE_GET_SIZE *g_pfnHvGicStateGetSize = NULL; /* Since 15.0 */
230static FN_HV_GIC_STATE_GET_DATA *g_pfnHvGicStateGetData = NULL; /* Since 15.0 */
231static FN_HV_GIC_SEND_MSI *g_pfnHvGicSendMsi = NULL; /* Since 15.0 */
232 FN_HV_GIC_SET_SPI *g_pfnHvGicSetSpi = NULL; /* Since 15.0, exported for GICR3Nem-darwin.cpp */
233static FN_HV_GIC_GET_DISTRIBUTOR_REG *g_pfnHvGicGetDistributorReg = NULL; /* Since 15.0 */
234static FN_HV_GIC_GET_MSI_REG *g_pfnHvGicGetMsiReg = NULL; /* Since 15.0 */
235static FN_HV_GIC_GET_ICC_REG *g_pfnHvGicGetIccReg = NULL; /* Since 15.0 */
236static FN_HV_GIC_GET_ICH_REG *g_pfnHvGicGetIchReg = NULL; /* Since 15.0 */
237static FN_HV_GIC_GET_ICV_REG *g_pfnHvGicGetIcvReg = NULL; /* Since 15.0 */
238static FN_HV_GIC_GET_REDISTRIBUTOR_REG *g_pfnHvGicGetRedistributorReg = NULL; /* Since 15.0 */
239static FN_HV_GIC_SET_DISTRIBUTOR_REG *g_pfnHvGicSetDistributorReg = NULL; /* Since 15.0 */
240static FN_HV_GIC_SET_MSI_REG *g_pfnHvGicSetMsiReg = NULL; /* Since 15.0 */
241static FN_HV_GIC_SET_ICC_REG *g_pfnHvGicSetIccReg = NULL; /* Since 15.0 */
242static FN_HV_GIC_SET_ICH_REG *g_pfnHvGicSetIchReg = NULL; /* Since 15.0 */
243static FN_HV_GIC_SET_ICV_REG *g_pfnHvGicSetIcvReg = NULL; /* Since 15.0 */
244static FN_HV_GIC_SET_REDISTRIBUTOR_REG *g_pfnHvGicSetRedistributorReg = NULL; /* Since 15.0 */
245static FN_HV_GIC_GET_INTID *g_pfnHvGicGetIntid = NULL; /* Since 15.0 */
246/** @} */
247
248
249/**
250 * Import instructions.
251 */
252static const struct
253{
254 void **ppfn; /**< The function pointer variable. */
255 const char *pszName; /**< The function name. */
256} g_aImports[] =
257{
258#define NEM_DARWIN_IMPORT(a_Pfn, a_Name) { (void **)&(a_Pfn), #a_Name }
259 NEM_DARWIN_IMPORT(g_pfnHvVmConfigCreate, hv_vm_config_create),
260 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Supported, hv_vm_config_get_el2_supported),
261 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Enabled, hv_vm_config_get_el2_enabled),
262 NEM_DARWIN_IMPORT(g_pfnHvVmConfigSetEl2Enabled, hv_vm_config_set_el2_enabled),
263
264 NEM_DARWIN_IMPORT(g_pfnHvGicCreate, hv_gic_create),
265 NEM_DARWIN_IMPORT(g_pfnHvGicReset, hv_gic_reset),
266 NEM_DARWIN_IMPORT(g_pfnHvGicConfigCreate, hv_gic_config_create),
267 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetDistributorBase, hv_gic_config_set_distributor_base),
268 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetRedistributorBase, hv_gic_config_set_redistributor_base),
269 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiRegionBase, hv_gic_config_set_msi_region_base),
270 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiInterruptRange, hv_gic_config_set_msi_interrupt_range),
271 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBase, hv_gic_get_redistributor_base),
272 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorRegionSize, hv_gic_get_redistributor_region_size),
273 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorSize, hv_gic_get_redistributor_size),
274 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorSize, hv_gic_get_distributor_size),
275 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorBaseAlignment, hv_gic_get_distributor_base_alignment),
276 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBaseAlignment, hv_gic_get_redistributor_base_alignment),
277 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionBaseAlignment, hv_gic_get_msi_region_base_alignment),
278 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionSize, hv_gic_get_msi_region_size),
279 NEM_DARWIN_IMPORT(g_pfnHvGicGetSpiInterruptRange, hv_gic_get_spi_interrupt_range),
280 NEM_DARWIN_IMPORT(g_pfnHvGicStateCreate, hv_gic_state_create),
281 NEM_DARWIN_IMPORT(g_pfnHvGicSetState, hv_gic_set_state),
282 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetSize, hv_gic_state_get_size),
283 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetData, hv_gic_state_get_data),
284 NEM_DARWIN_IMPORT(g_pfnHvGicSendMsi, hv_gic_send_msi),
285 NEM_DARWIN_IMPORT(g_pfnHvGicSetSpi, hv_gic_set_spi),
286 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorReg, hv_gic_get_distributor_reg),
287 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiReg, hv_gic_get_msi_reg),
288 NEM_DARWIN_IMPORT(g_pfnHvGicGetIccReg, hv_gic_get_icc_reg),
289 NEM_DARWIN_IMPORT(g_pfnHvGicGetIchReg, hv_gic_get_ich_reg),
290 NEM_DARWIN_IMPORT(g_pfnHvGicGetIcvReg, hv_gic_get_icv_reg),
291 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorReg, hv_gic_get_redistributor_reg),
292 NEM_DARWIN_IMPORT(g_pfnHvGicSetDistributorReg, hv_gic_set_distributor_reg),
293 NEM_DARWIN_IMPORT(g_pfnHvGicSetMsiReg, hv_gic_set_msi_reg),
294 NEM_DARWIN_IMPORT(g_pfnHvGicSetIccReg, hv_gic_set_icc_reg),
295 NEM_DARWIN_IMPORT(g_pfnHvGicSetIchReg, hv_gic_set_ich_reg),
296 NEM_DARWIN_IMPORT(g_pfnHvGicSetIcvReg, hv_gic_set_icv_reg),
297 NEM_DARWIN_IMPORT(g_pfnHvGicSetRedistributorReg, hv_gic_set_redistributor_reg),
298 NEM_DARWIN_IMPORT(g_pfnHvGicGetIntid, hv_gic_get_intid)
299#undef NEM_DARWIN_IMPORT
300};
301
302
303/*
304 * Let the preprocessor alias the APIs to import variables for better autocompletion.
305 */
306#ifndef IN_SLICKEDIT
307# define hv_vm_config_create g_pfnHvVmConfigCreate
308# define hv_vm_config_get_el2_supported g_pfnHvVmConfigGetEl2Supported
309# define hv_vm_config_get_el2_enabled g_pfnHvVmConfigGetEl2Enabled
310# define hv_vm_config_set_el2_enabled g_pfnHvVmConfigSetEl2Enabled
311
312# define hv_gic_create g_pfnHvGicCreate
313# define hv_gic_reset g_pfnHvGicReset
314# define hv_gic_config_create g_pfnHvGicConfigCreate
315# define hv_gic_config_set_distributor_base g_pfnHvGicConfigSetDistributorBase
316# define hv_gic_config_set_redistributor_base g_pfnHvGicConfigSetRedistributorBase
317# define hv_gic_config_set_msi_region_base g_pfnHvGicConfigSetMsiRegionBase
318# define hv_gic_config_set_msi_interrupt_range g_pfnHvGicConfigSetMsiInterruptRange
319# define hv_gic_get_redistributor_base g_pfnHvGicGetRedistributorBase
320# define hv_gic_get_redistributor_region_size g_pfnHvGicGetRedistributorRegionSize
321# define hv_gic_get_redistributor_size g_pfnHvGicGetRedistributorSize
322# define hv_gic_get_distributor_size g_pfnHvGicGetDistributorSize
323# define hv_gic_get_distributor_base_alignment g_pfnHvGicGetDistributorBaseAlignment
324# define hv_gic_get_redistributor_base_alignment g_pfnHvGicGetRedistributorBaseAlignment
325# define hv_gic_get_msi_region_base_alignment g_pfnHvGicGetMsiRegionBaseAlignment
326# define hv_gic_get_msi_region_size g_pfnHvGicGetMsiRegionSize
327# define hv_gic_get_spi_interrupt_range g_pfnHvGicGetSpiInterruptRange
328# define hv_gic_state_create g_pfnHvGicStateCreate
329# define hv_gic_set_state g_pfnHvGicSetState
330# define hv_gic_state_get_size g_pfnHvGicStateGetSize
331# define hv_gic_state_get_data g_pfnHvGicStateGetData
332# define hv_gic_send_msi g_pfnHvGicSendMsi
333# define hv_gic_set_spi g_pfnHvGicSetSpi
334# define hv_gic_get_distributor_reg g_pfnHvGicGetDistributorReg
335# define hv_gic_get_msi_reg g_pfnHvGicGetMsiReg
336# define hv_gic_get_icc_reg g_pfnHvGicGetIccReg
337# define hv_gic_get_ich_reg g_pfnHvGicGetIchReg
338# define hv_gic_get_icv_reg g_pfnHvGicGetIcvReg
339# define hv_gic_get_redistributor_reg g_pfnHvGicGetRedistributorReg
340# define hv_gic_set_distributor_reg g_pfnHvGicSetDistributorReg
341# define hv_gic_set_msi_reg g_pfnHvGicSetMsiReg
342# define hv_gic_set_icc_reg g_pfnHvGicSetIccReg
343# define hv_gic_set_ich_reg g_pfnHvGicSetIchReg
344# define hv_gic_set_icv_reg g_pfnHvGicSetIcvReg
345# define hv_gic_set_redistributor_reg g_pfnHvGicSetRedistributorReg
346# define hv_gic_get_intid g_pfnHvGicGetIntid
347#endif
348
349
350/** The general registers. */
351static const struct
352{
353 hv_reg_t enmHvReg;
354 uint32_t fCpumExtrn;
355 uint32_t offCpumCtx;
356} s_aCpumRegs[] =
357{
358#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
359#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
360 CPUM_GREG_EMIT_X0_X3(0),
361 CPUM_GREG_EMIT_X0_X3(1),
362 CPUM_GREG_EMIT_X0_X3(2),
363 CPUM_GREG_EMIT_X0_X3(3),
364 CPUM_GREG_EMIT_X4_X28(4),
365 CPUM_GREG_EMIT_X4_X28(5),
366 CPUM_GREG_EMIT_X4_X28(6),
367 CPUM_GREG_EMIT_X4_X28(7),
368 CPUM_GREG_EMIT_X4_X28(8),
369 CPUM_GREG_EMIT_X4_X28(9),
370 CPUM_GREG_EMIT_X4_X28(10),
371 CPUM_GREG_EMIT_X4_X28(11),
372 CPUM_GREG_EMIT_X4_X28(12),
373 CPUM_GREG_EMIT_X4_X28(13),
374 CPUM_GREG_EMIT_X4_X28(14),
375 CPUM_GREG_EMIT_X4_X28(15),
376 CPUM_GREG_EMIT_X4_X28(16),
377 CPUM_GREG_EMIT_X4_X28(17),
378 CPUM_GREG_EMIT_X4_X28(18),
379 CPUM_GREG_EMIT_X4_X28(19),
380 CPUM_GREG_EMIT_X4_X28(20),
381 CPUM_GREG_EMIT_X4_X28(21),
382 CPUM_GREG_EMIT_X4_X28(22),
383 CPUM_GREG_EMIT_X4_X28(23),
384 CPUM_GREG_EMIT_X4_X28(24),
385 CPUM_GREG_EMIT_X4_X28(25),
386 CPUM_GREG_EMIT_X4_X28(26),
387 CPUM_GREG_EMIT_X4_X28(27),
388 CPUM_GREG_EMIT_X4_X28(28),
389 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
390 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
391 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
392 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
393 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
394#undef CPUM_GREG_EMIT_X0_X3
395#undef CPUM_GREG_EMIT_X4_X28
396};
397/** SIMD/FP registers. */
398static const struct
399{
400 hv_simd_fp_reg_t enmHvReg;
401 uint32_t offCpumCtx;
402} s_aCpumFpRegs[] =
403{
404#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
405 CPUM_VREG_EMIT(0),
406 CPUM_VREG_EMIT(1),
407 CPUM_VREG_EMIT(2),
408 CPUM_VREG_EMIT(3),
409 CPUM_VREG_EMIT(4),
410 CPUM_VREG_EMIT(5),
411 CPUM_VREG_EMIT(6),
412 CPUM_VREG_EMIT(7),
413 CPUM_VREG_EMIT(8),
414 CPUM_VREG_EMIT(9),
415 CPUM_VREG_EMIT(10),
416 CPUM_VREG_EMIT(11),
417 CPUM_VREG_EMIT(12),
418 CPUM_VREG_EMIT(13),
419 CPUM_VREG_EMIT(14),
420 CPUM_VREG_EMIT(15),
421 CPUM_VREG_EMIT(16),
422 CPUM_VREG_EMIT(17),
423 CPUM_VREG_EMIT(18),
424 CPUM_VREG_EMIT(19),
425 CPUM_VREG_EMIT(20),
426 CPUM_VREG_EMIT(21),
427 CPUM_VREG_EMIT(22),
428 CPUM_VREG_EMIT(23),
429 CPUM_VREG_EMIT(24),
430 CPUM_VREG_EMIT(25),
431 CPUM_VREG_EMIT(26),
432 CPUM_VREG_EMIT(27),
433 CPUM_VREG_EMIT(28),
434 CPUM_VREG_EMIT(29),
435 CPUM_VREG_EMIT(30),
436 CPUM_VREG_EMIT(31)
437#undef CPUM_VREG_EMIT
438};
439/** Debug system registers. */
440static const struct
441{
442 hv_sys_reg_t enmHvReg;
443 uint32_t offCpumCtx;
444} s_aCpumDbgRegs[] =
445{
446#define CPUM_DBGREG_EMIT(a_BorW, a_Idx) \
447 { HV_SYS_REG_DBG ## a_BorW ## CR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Ctrl.u64) }, \
448 { HV_SYS_REG_DBG ## a_BorW ## VR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Value.u64) }
449 /* Breakpoint registers. */
450 CPUM_DBGREG_EMIT(B, 0),
451 CPUM_DBGREG_EMIT(B, 1),
452 CPUM_DBGREG_EMIT(B, 2),
453 CPUM_DBGREG_EMIT(B, 3),
454 CPUM_DBGREG_EMIT(B, 4),
455 CPUM_DBGREG_EMIT(B, 5),
456 CPUM_DBGREG_EMIT(B, 6),
457 CPUM_DBGREG_EMIT(B, 7),
458 CPUM_DBGREG_EMIT(B, 8),
459 CPUM_DBGREG_EMIT(B, 9),
460 CPUM_DBGREG_EMIT(B, 10),
461 CPUM_DBGREG_EMIT(B, 11),
462 CPUM_DBGREG_EMIT(B, 12),
463 CPUM_DBGREG_EMIT(B, 13),
464 CPUM_DBGREG_EMIT(B, 14),
465 CPUM_DBGREG_EMIT(B, 15),
466 /* Watchpoint registers. */
467 CPUM_DBGREG_EMIT(W, 0),
468 CPUM_DBGREG_EMIT(W, 1),
469 CPUM_DBGREG_EMIT(W, 2),
470 CPUM_DBGREG_EMIT(W, 3),
471 CPUM_DBGREG_EMIT(W, 4),
472 CPUM_DBGREG_EMIT(W, 5),
473 CPUM_DBGREG_EMIT(W, 6),
474 CPUM_DBGREG_EMIT(W, 7),
475 CPUM_DBGREG_EMIT(W, 8),
476 CPUM_DBGREG_EMIT(W, 9),
477 CPUM_DBGREG_EMIT(W, 10),
478 CPUM_DBGREG_EMIT(W, 11),
479 CPUM_DBGREG_EMIT(W, 12),
480 CPUM_DBGREG_EMIT(W, 13),
481 CPUM_DBGREG_EMIT(W, 14),
482 CPUM_DBGREG_EMIT(W, 15),
483 { HV_SYS_REG_MDSCR_EL1, RT_UOFFSETOF(CPUMCTX, Mdscr.u64) }
484#undef CPUM_DBGREG_EMIT
485};
486/** PAuth key system registers. */
487static const struct
488{
489 hv_sys_reg_t enmHvReg;
490 uint32_t offCpumCtx;
491} s_aCpumPAuthKeyRegs[] =
492{
493 { HV_SYS_REG_APDAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
494 { HV_SYS_REG_APDAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
495 { HV_SYS_REG_APDBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
496 { HV_SYS_REG_APDBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
497 { HV_SYS_REG_APGAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
498 { HV_SYS_REG_APGAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
499 { HV_SYS_REG_APIAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
500 { HV_SYS_REG_APIAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
501 { HV_SYS_REG_APIBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
502 { HV_SYS_REG_APIBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
503};
504/** System registers. */
505static const struct
506{
507 hv_sys_reg_t enmHvReg;
508 uint32_t fCpumExtrn;
509 uint32_t offCpumCtx;
510} s_aCpumSysRegs[] =
511{
512 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
513 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
514 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
515 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
516 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
517 { HV_SYS_REG_AFSR0_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
518 { HV_SYS_REG_AFSR1_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
519 { HV_SYS_REG_AMAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
520 { HV_SYS_REG_CNTKCTL_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
521 { HV_SYS_REG_CONTEXTIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
522 { HV_SYS_REG_CPACR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
523 { HV_SYS_REG_CSSELR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
524 { HV_SYS_REG_ESR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
525 { HV_SYS_REG_FAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
526 { HV_SYS_REG_MAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
527 { HV_SYS_REG_PAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
528 { HV_SYS_REG_TPIDRRO_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
529 { HV_SYS_REG_TPIDR_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
530 { HV_SYS_REG_TPIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
531 { HV_SYS_REG_MDCCINT_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
532
533};
534/** Paging registers (CPUMCTX_EXTRN_SCTLR_TCR_TTBR). */
535static const struct
536{
537 hv_sys_reg_t enmHvReg;
538 uint32_t offCpumCtx;
539} s_aCpumSysRegsPg[] =
540{
541 { HV_SYS_REG_SCTLR_EL1, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
542 { HV_SYS_REG_TCR_EL1, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
543 { HV_SYS_REG_TTBR0_EL1, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
544 { HV_SYS_REG_TTBR1_EL1, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) }
545};
546
547/** Additional System registers to sync when on at least macOS Sequioa 15.0. */
548static const struct
549{
550 hv_sys_reg_t enmHvReg;
551 uint32_t fCpumExtrn;
552 uint32_t offCpumCtx;
553} s_aCpumSysRegsSequioa[] =
554{
555 { HV_SYS_REG_ACTLR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Actlr.u64) }
556};
557/** EL2 support system registers. */
558static const struct
559{
560 uint16_t idSysReg;
561 uint32_t offCpumCtx;
562} s_aCpumEl2SysRegs[] =
563{
564 { ARMV8_AARCH64_SYSREG_CNTHCTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHCtlEl2.u64) },
565 { ARMV8_AARCH64_SYSREG_CNTHP_CTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCtlEl2.u64) },
566 { ARMV8_AARCH64_SYSREG_CNTHP_CVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCValEl2.u64) },
567 { ARMV8_AARCH64_SYSREG_CNTHP_TVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpTValEl2.u64) },
568 { ARMV8_AARCH64_SYSREG_CNTVOFF_EL2, RT_UOFFSETOF(CPUMCTX, CntVOffEl2.u64) },
569 { ARMV8_AARCH64_SYSREG_CPTR_EL2, RT_UOFFSETOF(CPUMCTX, CptrEl2.u64) },
570 { ARMV8_AARCH64_SYSREG_ELR_EL2, RT_UOFFSETOF(CPUMCTX, ElrEl2.u64) },
571 { ARMV8_AARCH64_SYSREG_ESR_EL2, RT_UOFFSETOF(CPUMCTX, EsrEl2.u64) },
572 { ARMV8_AARCH64_SYSREG_FAR_EL2, RT_UOFFSETOF(CPUMCTX, FarEl2.u64) },
573 { ARMV8_AARCH64_SYSREG_HCR_EL2, RT_UOFFSETOF(CPUMCTX, HcrEl2.u64) },
574 { ARMV8_AARCH64_SYSREG_HPFAR_EL2, RT_UOFFSETOF(CPUMCTX, HpFarEl2.u64) },
575 { ARMV8_AARCH64_SYSREG_MAIR_EL2, RT_UOFFSETOF(CPUMCTX, MairEl2.u64) },
576 //{ ARMV8_AARCH64_SYSREG_MDCR_EL2, RT_UOFFSETOF(CPUMCTX, MdcrEl2.u64) },
577 { ARMV8_AARCH64_SYSREG_SCTLR_EL2, RT_UOFFSETOF(CPUMCTX, SctlrEl2.u64) },
578 { ARMV8_AARCH64_SYSREG_SPSR_EL2, RT_UOFFSETOF(CPUMCTX, SpsrEl2.u64) },
579 { ARMV8_AARCH64_SYSREG_SP_EL2, RT_UOFFSETOF(CPUMCTX, SpEl2.u64) },
580 { ARMV8_AARCH64_SYSREG_TCR_EL2, RT_UOFFSETOF(CPUMCTX, TcrEl2.u64) },
581 { ARMV8_AARCH64_SYSREG_TPIDR_EL2, RT_UOFFSETOF(CPUMCTX, TpidrEl2.u64) },
582 { ARMV8_AARCH64_SYSREG_TTBR0_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr0El2.u64) },
583 { ARMV8_AARCH64_SYSREG_TTBR1_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr1El2.u64) },
584 { ARMV8_AARCH64_SYSREG_VBAR_EL2, RT_UOFFSETOF(CPUMCTX, VBarEl2.u64) },
585 { ARMV8_AARCH64_SYSREG_VMPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VMpidrEl2.u64) },
586 { ARMV8_AARCH64_SYSREG_VPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VPidrEl2.u64) },
587 { ARMV8_AARCH64_SYSREG_VTCR_EL2, RT_UOFFSETOF(CPUMCTX, VTcrEl2.u64) },
588 { ARMV8_AARCH64_SYSREG_VTTBR_EL2, RT_UOFFSETOF(CPUMCTX, VTtbrEl2.u64) }
589};
590/** ID registers. */
591static const struct
592{
593 hv_feature_reg_t enmHvReg;
594 uint32_t offIdStruct;
595} s_aIdRegs[] =
596{
597 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1) },
598 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1) },
599 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1) },
600 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1) },
601 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1) },
602 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1) },
603 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1) },
604 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1) },
605 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1) },
606 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegClidrEl1) },
607 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegCtrEl0) },
608 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegDczidEl0) }
609};
610
611
612/*********************************************************************************************************************************
613* Internal Functions *
614*********************************************************************************************************************************/
615
616
617/**
618 * Converts a HV return code to a VBox status code.
619 *
620 * @returns VBox status code.
621 * @param hrc The HV return code to convert.
622 */
623DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
624{
625 if (hrc == HV_SUCCESS)
626 return VINF_SUCCESS;
627
628 switch (hrc)
629 {
630 case HV_ERROR: return VERR_INVALID_STATE;
631 case HV_BUSY: return VERR_RESOURCE_BUSY;
632 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
633 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
634 case HV_NO_DEVICE: return VERR_NOT_FOUND;
635 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
636 }
637
638 return VERR_IPE_UNEXPECTED_STATUS;
639}
640
641
642/** Puts a name to a hypervisor framework status code. */
643static const char *nemR3DarwinHvStatusName(hv_return_t hrc)
644{
645 switch (hrc)
646 {
647 RT_CASE_RET_STR(HV_SUCCESS);
648 RT_CASE_RET_STR(HV_ERROR);
649 RT_CASE_RET_STR(HV_BUSY);
650 RT_CASE_RET_STR(HV_BAD_ARGUMENT);
651 RT_CASE_RET_STR(HV_ILLEGAL_GUEST_STATE);
652 RT_CASE_RET_STR(HV_NO_RESOURCES);
653 RT_CASE_RET_STR(HV_NO_DEVICE);
654 RT_CASE_RET_STR(HV_DENIED);
655 RT_CASE_RET_STR(HV_UNSUPPORTED);
656 }
657 return "";
658}
659
660
661#if 0 /* unused right now */
662/**
663 * Converts an ICC system register into Darwin's Hypervisor.Framework equivalent.
664 *
665 * @returns HvF's ICC system register.
666 * @param u32Reg The ARMv8 ICC system register.
667 */
668static hv_gic_icc_reg_t nemR3DarwinIccRegFromSysReg(uint32_t u32Reg)
669{
670 switch (u32Reg)
671 {
672 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1: return HV_GIC_ICC_REG_PMR_EL1;
673 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1: return HV_GIC_ICC_REG_INVALID;
674 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1: return HV_GIC_ICC_REG_INVALID;
675 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1: return HV_GIC_ICC_REG_INVALID;
676 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1: return HV_GIC_ICC_REG_BPR0_EL1;
677 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1: return HV_GIC_ICC_REG_AP0R0_EL1;
678 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1: return HV_GIC_ICC_REG_INVALID;
679 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1: return HV_GIC_ICC_REG_INVALID;
680 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1: return HV_GIC_ICC_REG_INVALID;
681 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1: return HV_GIC_ICC_REG_AP1R0_EL1;
682 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1: return HV_GIC_ICC_REG_INVALID;
683 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1: return HV_GIC_ICC_REG_INVALID;
684 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1: return HV_GIC_ICC_REG_INVALID;
685 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1: return HV_GIC_ICC_REG_INVALID;
686 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1: return HV_GIC_ICC_REG_INVALID;
687 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1: return HV_GIC_ICC_REG_RPR_EL1;
688 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1: return HV_GIC_ICC_REG_INVALID;
689 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1: return HV_GIC_ICC_REG_INVALID;
690 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1: return HV_GIC_ICC_REG_INVALID;
691 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1: return HV_GIC_ICC_REG_INVALID;
692 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1: return HV_GIC_ICC_REG_INVALID;
693 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1: return HV_GIC_ICC_REG_INVALID;
694 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1: return HV_GIC_ICC_REG_BPR1_EL1;
695 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1: return HV_GIC_ICC_REG_CTLR_EL1;
696 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1: return HV_GIC_ICC_REG_SRE_EL1;
697 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1: return HV_GIC_ICC_REG_IGRPEN0_EL1;
698 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1: return HV_GIC_ICC_REG_IGRPEN1_EL1;
699 }
700 AssertReleaseFailed();
701 return HV_GIC_ICC_REG_INVALID;
702}
703#endif
704
705
706/**
707 * Returns a human readable string of the given exception class.
708 *
709 * @returns Pointer to the string matching the given EC.
710 * @param u32Ec The exception class to return the string for.
711 */
712static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
713{
714 switch (u32Ec)
715 {
716#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
717 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
718 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
719 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
720 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
721 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
722 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
723 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
724 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
725 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
726 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
727 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
728 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
729 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
730 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
731 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
732 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
733 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
734 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
735 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
736 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
737 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
738 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
739 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
740 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
741 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
742 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
743 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
744 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
745 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
746 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
747 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
748 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
749 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
750 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
751 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
752 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
753 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
754 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
755 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
756 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
757 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
758 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
759 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
760 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
761 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
762#undef ARMV8_EC_CASE
763 default:
764 break;
765 }
766
767 return "<INVALID>";
768}
769
770
771/**
772 * Resolves a NEM page state from the given protection flags.
773 *
774 * @returns NEM page state.
775 * @param fPageProt The page protection flags.
776 */
777DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
778{
779 switch (fPageProt)
780 {
781 case NEM_PAGE_PROT_NONE:
782 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
783 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
784 return NEM_DARWIN_PAGE_STATE_RX;
785 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
786 return NEM_DARWIN_PAGE_STATE_RW;
787 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
788 return NEM_DARWIN_PAGE_STATE_RWX;
789 default:
790 break;
791 }
792
793 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
794 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
795}
796
797
798/**
799 * Unmaps the given guest physical address range (page aligned).
800 *
801 * @returns VBox status code.
802 * @param pVM The cross context VM structure.
803 * @param GCPhys The guest physical address to start unmapping at.
804 * @param cb The size of the range to unmap in bytes.
805 * @param pu2State Where to store the new state of the unmappd page, optional.
806 */
807DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
808{
809 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
810 {
811 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
812 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
813 return VINF_SUCCESS;
814 }
815
816 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
817 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
818 if (RT_LIKELY(hrc == HV_SUCCESS))
819 {
820 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
821 if (pu2State)
822 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
823 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
824 return VINF_SUCCESS;
825 }
826
827 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
828 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
829 GCPhys, hrc));
830 return VERR_NEM_IPE_6;
831}
832
833
834/**
835 * Maps a given guest physical address range backed by the given memory with the given
836 * protection flags.
837 *
838 * @returns VBox status code.
839 * @param pVM The cross context VM structure.
840 * @param GCPhys The guest physical address to start mapping.
841 * @param pvRam The R3 pointer of the memory to back the range with.
842 * @param cb The size of the range, page aligned.
843 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
844 * @param pu2State Where to store the state for the new page, optional.
845 */
846DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
847{
848 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
849
850 Assert(fPageProt != NEM_PAGE_PROT_NONE);
851 RT_NOREF(pVM);
852
853 hv_memory_flags_t fHvMemProt = 0;
854 if (fPageProt & NEM_PAGE_PROT_READ)
855 fHvMemProt |= HV_MEMORY_READ;
856 if (fPageProt & NEM_PAGE_PROT_WRITE)
857 fHvMemProt |= HV_MEMORY_WRITE;
858 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
859 fHvMemProt |= HV_MEMORY_EXEC;
860
861 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
862 if (hrc == HV_SUCCESS)
863 {
864 if (pu2State)
865 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
866 return VINF_SUCCESS;
867 }
868
869 return nemR3DarwinHvSts2Rc(hrc);
870}
871
872
873/**
874 * Changes the protection flags for the given guest physical address range.
875 *
876 * @returns VBox status code.
877 * @param GCPhys The guest physical address to start mapping.
878 * @param cb The size of the range, page aligned.
879 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
880 * @param pu2State Where to store the state for the new page, optional.
881 */
882DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
883{
884 hv_memory_flags_t fHvMemProt = 0;
885 if (fPageProt & NEM_PAGE_PROT_READ)
886 fHvMemProt |= HV_MEMORY_READ;
887 if (fPageProt & NEM_PAGE_PROT_WRITE)
888 fHvMemProt |= HV_MEMORY_WRITE;
889 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
890 fHvMemProt |= HV_MEMORY_EXEC;
891
892 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
893 if (hrc == HV_SUCCESS)
894 {
895 if (pu2State)
896 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
897 return VINF_SUCCESS;
898 }
899
900 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
901 GCPhys, cb, fPageProt, hrc));
902 return nemR3DarwinHvSts2Rc(hrc);
903}
904
905
906#ifdef LOG_ENABLED
907/**
908 * Logs the current CPU state.
909 */
910static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
911{
912 if (LogIs3Enabled())
913 {
914 char szRegs[4096];
915 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
916 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
917 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
918 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
919 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
920 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
921 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
922 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
923 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
924 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
925 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
926 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
927 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
928 "vbar_el1=%016VR{vbar_el1} actlr_el1=%016VR{actlr_el1}\n"
929 );
930 if (pVM->nem.s.fEl2Enabled)
931 {
932 Log3(("%s\n", szRegs));
933 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
934 "sp_el2=%016VR{sp_el2} elr_el2=%016VR{elr_el2}\n"
935 "spsr_el2=%016VR{spsr_el2} tpidr_el2=%016VR{tpidr_el2}\n"
936 "sctlr_el2=%016VR{sctlr_el2} tcr_el2=%016VR{tcr_el2}\n"
937 "ttbr0_el2=%016VR{ttbr0_el2} ttbr1_el2=%016VR{ttbr1_el2}\n"
938 "esr_el2=%016VR{esr_el2} far_el2=%016VR{far_el2}\n"
939 "hcr_el2=%016VR{hcr_el2} tcr_el2=%016VR{tcr_el2}\n"
940 "vbar_el2=%016VR{vbar_el2} cptr_el2=%016VR{cptr_el2}\n"
941 );
942 }
943 char szInstr[256]; RT_ZERO(szInstr);
944 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
945 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
946 szInstr, sizeof(szInstr), NULL);
947 Log3(("%s%s\n", szRegs, szInstr));
948 }
949}
950#endif /* LOG_ENABLED */
951
952
953static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
954{
955 RT_NOREF(pVM);
956
957 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
958 if (hrc == HV_SUCCESS)
959 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
960
961 if ( hrc == HV_SUCCESS
962 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
963 {
964 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
965 {
966 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
967 {
968 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
969 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
970 }
971 }
972 }
973
974 if ( hrc == HV_SUCCESS
975 && (fWhat & CPUMCTX_EXTRN_V0_V31))
976 {
977 /* SIMD/FP registers. */
978 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
979 {
980 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
981 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
982 }
983 }
984
985 if ( hrc == HV_SUCCESS
986 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
987 {
988 /* Debug registers. */
989 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
990 {
991 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
992 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, pu64);
993 }
994 }
995
996 if ( hrc == HV_SUCCESS
997 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
998 {
999 /* Debug registers. */
1000 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1001 {
1002 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1003 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, pu64);
1004 }
1005 }
1006
1007 if ( hrc == HV_SUCCESS
1008 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC)))
1009 {
1010 /* System registers. */
1011 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1012 {
1013 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
1014 {
1015 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1016 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
1017 }
1018 }
1019 }
1020
1021 /* The paging related system registers need to be treated differently as they might invoke a PGM mode change. */
1022 bool fPgModeChange = false;
1023 uint64_t u64RegSctlrEl1;
1024 uint64_t u64RegTcrEl1;
1025 if ( hrc == HV_SUCCESS
1026 && (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
1027 {
1028 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_SCTLR_EL1, &u64RegSctlrEl1);
1029 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_TCR_EL1, &u64RegTcrEl1);
1030 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_TTBR0_EL1, &pVCpu->cpum.GstCtx.Ttbr0.u64);
1031 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_TTBR1_EL1, &pVCpu->cpum.GstCtx.Ttbr1.u64);
1032 if ( hrc == HV_SUCCESS
1033 && ( u64RegSctlrEl1 != pVCpu->cpum.GstCtx.Sctlr.u64
1034 || u64RegTcrEl1 != pVCpu->cpum.GstCtx.Tcr.u64))
1035 {
1036 pVCpu->cpum.GstCtx.Sctlr.u64 = u64RegSctlrEl1;
1037 pVCpu->cpum.GstCtx.Tcr.u64 = u64RegTcrEl1;
1038 fPgModeChange = true;
1039 }
1040 }
1041
1042 if ( hrc == HV_SUCCESS
1043 && pVM->nem.s.fMacOsSequia
1044 && (fWhat & CPUMCTX_EXTRN_SYSREG_MISC))
1045 {
1046 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegsSequioa); i++)
1047 {
1048 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegsSequioa[i].offCpumCtx);
1049 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegsSequioa[i].enmHvReg, pu64);
1050
1051 /* Make sure only the TOS bit is kept as this seems to return 0x0000000000000c00 which fails during writes. */
1052 /** @todo r=aeichner Need to find out where the value comes from, some bits were reverse engineered here
1053 * https://github.com/AsahiLinux/docs/blob/main/docs/hw/cpu/system-registers.md#actlr_el1-arm-standard-not-standard
1054 * But the ones being set are not documented. Maybe they are always set by the Hypervisor...
1055 */
1056 if (s_aCpumSysRegsSequioa[i].enmHvReg == HV_SYS_REG_ACTLR_EL1)
1057 *pu64 &= RT_BIT_64(1);
1058 }
1059 }
1060
1061 if ( hrc == HV_SUCCESS
1062 && (fWhat & CPUMCTX_EXTRN_SYSREG_EL2)
1063 && pVM->nem.s.fEl2Enabled)
1064 {
1065 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
1066 {
1067 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
1068 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, pu64);
1069 }
1070 }
1071
1072 if ( hrc == HV_SUCCESS
1073 && (fWhat & CPUMCTX_EXTRN_PSTATE))
1074 {
1075 uint64_t u64Tmp;
1076 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
1077 if (hrc == HV_SUCCESS)
1078 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
1079 }
1080
1081 if (fPgModeChange)
1082 {
1083 int rc = PGMChangeMode(pVCpu, 1 /*bEl*/, u64RegSctlrEl1, u64RegTcrEl1);
1084 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1085 }
1086
1087 /* Almost done, just update extern flags. */
1088 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1089 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1090 pVCpu->cpum.GstCtx.fExtrn = 0;
1091
1092 return nemR3DarwinHvSts2Rc(hrc);
1093}
1094
1095
1096/**
1097 * Exports the guest state to HV for execution.
1098 *
1099 * @returns VBox status code.
1100 * @param pVM The cross context VM structure.
1101 * @param pVCpu The cross context virtual CPU structure of the
1102 * calling EMT.
1103 */
1104static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
1105{
1106 RT_NOREF(pVM);
1107 hv_return_t hrc = HV_SUCCESS;
1108
1109 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1110 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1111 {
1112 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1113 {
1114 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1115 {
1116 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
1117 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
1118 }
1119 }
1120 }
1121
1122 if ( hrc == HV_SUCCESS
1123 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
1124 {
1125 /* SIMD/FP registers. */
1126 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1127 {
1128 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1129 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
1130 }
1131 }
1132
1133 if ( hrc == HV_SUCCESS
1134 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
1135 {
1136 /* Debug registers. */
1137 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
1138 {
1139 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
1140 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
1141 }
1142 }
1143
1144 if ( hrc == HV_SUCCESS
1145 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
1146 {
1147 /* Debug registers. */
1148 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1149 {
1150 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1151 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
1152 }
1153 }
1154
1155 if ( hrc == HV_SUCCESS
1156 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1157 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SYSREG_MISC))
1158 {
1159 /* System registers. */
1160 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1161 {
1162 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1163 {
1164 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1165 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
1166 }
1167 }
1168 }
1169
1170 if ( hrc == HV_SUCCESS
1171 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
1172 {
1173 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegsPg); i++)
1174 {
1175 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegsPg[i].offCpumCtx);
1176 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegsPg[i].enmHvReg, *pu64);
1177 }
1178 }
1179
1180 if ( hrc == HV_SUCCESS
1181 && pVM->nem.s.fMacOsSequia
1182 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_MISC))
1183 {
1184 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegsSequioa); i++)
1185 {
1186 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegsSequioa[i].offCpumCtx);
1187 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegsSequioa[i].enmHvReg, *pu64);
1188 Assert(hrc == HV_SUCCESS);
1189 }
1190 }
1191
1192 if ( hrc == HV_SUCCESS
1193 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_EL2)
1194 && pVM->nem.s.fEl2Enabled)
1195 {
1196 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
1197 {
1198 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
1199 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, *pu64);
1200 Assert(hrc == HV_SUCCESS);
1201 }
1202 }
1203
1204 if ( hrc == HV_SUCCESS
1205 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
1206 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
1207
1208 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1209 return nemR3DarwinHvSts2Rc(hrc);
1210}
1211
1212
1213/**
1214 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
1215 *
1216 * @returns VBox status code.
1217 * @param pErrInfo Where to always return error info.
1218 */
1219static int nemR3DarwinLoadHv(PRTERRINFO pErrInfo)
1220{
1221 RTLDRMOD hMod = NIL_RTLDRMOD;
1222 static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
1223
1224 int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
1225 if (RT_SUCCESS(rc))
1226 {
1227 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
1228 {
1229 int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
1230 if (RT_SUCCESS(rc2))
1231 LogRel(("NEM: info: Found optional import Hypervisor!%s.\n", g_aImports[i].pszName));
1232 else
1233 {
1234 *g_aImports[i].ppfn = NULL;
1235 LogRel(("NEM: info: Optional import Hypervisor!%s not found: %Rrc\n", g_aImports[i].pszName, rc2));
1236 }
1237 }
1238 Assert(RT_SUCCESS(rc) && !RTErrInfoIsSet(pErrInfo));
1239 RTLdrClose(hMod);
1240 }
1241 else
1242 {
1243 RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
1244 rc = VERR_NEM_INIT_FAILED;
1245 }
1246
1247 return rc;
1248}
1249
1250
1251/**
1252 * Dumps some GIC information to the release log.
1253 */
1254static void nemR3DarwinDumpGicInfo(void)
1255{
1256 size_t val = 0;
1257 hv_return_t hrc = hv_gic_get_redistributor_size(&val);
1258 LogRel(("GICNem: hv_gic_get_redistributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1259 hrc = hv_gic_get_distributor_size(&val);
1260 LogRel(("GICNem: hv_gic_get_distributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1261 hrc = hv_gic_get_distributor_base_alignment(&val);
1262 LogRel(("GICNem: hv_gic_get_distributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1263 hrc = hv_gic_get_redistributor_base_alignment(&val);
1264 LogRel(("GICNem: hv_gic_get_redistributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1265 hrc = hv_gic_get_msi_region_base_alignment(&val);
1266 LogRel(("GICNem: hv_gic_get_msi_region_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1267 hrc = hv_gic_get_msi_region_size(&val);
1268 LogRel(("GICNem: hv_gic_get_msi_region_size() -> hrc=%#x / size=%zu\n", hrc, val));
1269 uint32_t u32SpiIntIdBase = 0;
1270 uint32_t cSpiIntIds = 0;
1271 hrc = hv_gic_get_spi_interrupt_range(&u32SpiIntIdBase, &cSpiIntIds);
1272 LogRel(("GICNem: hv_gic_get_spi_interrupt_range() -> hrc=%#x / SpiIntIdBase=%u, cSpiIntIds=%u\n", hrc, u32SpiIntIdBase, cSpiIntIds));
1273
1274 uint32_t u32IntId = 0;
1275 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER, &u32IntId);
1276 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1277 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER, &u32IntId);
1278 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1279 hrc = hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER, &u32IntId);
1280 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1281 hrc = hv_gic_get_intid(HV_GIC_INT_MAINTENANCE, &u32IntId);
1282 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_MAINTENANCE) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1283 hrc = hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR, &u32IntId);
1284 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1285}
1286
1287
1288static int nemR3DarwinGicCreate(PVM pVM)
1289{
1290 nemR3DarwinDumpGicInfo();
1291
1292 //PCFGMNODE pGicDev = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic/0");
1293 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
1294 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
1295
1296 hv_gic_config_t hGicCfg = hv_gic_config_create();
1297
1298 /*
1299 * Query the MMIO ranges.
1300 */
1301 RTGCPHYS GCPhysMmioBaseDist = 0;
1302 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
1303 if (RT_FAILURE(rc))
1304 return VMSetError(pVM, rc, RT_SRC_POS,
1305 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
1306
1307 RTGCPHYS GCPhysMmioBaseReDist = 0;
1308 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
1309 if (RT_FAILURE(rc))
1310 return VMSetError(pVM, rc, RT_SRC_POS,
1311 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
1312
1313 hv_return_t hrc = hv_gic_config_set_distributor_base(hGicCfg, GCPhysMmioBaseDist);
1314 if (hrc != HV_SUCCESS)
1315 return nemR3DarwinHvSts2Rc(hrc);
1316
1317 hrc = hv_gic_config_set_redistributor_base(hGicCfg, GCPhysMmioBaseReDist);
1318 if (hrc != HV_SUCCESS)
1319 return nemR3DarwinHvSts2Rc(hrc);
1320
1321 hrc = hv_gic_create(hGicCfg);
1322 os_release(hGicCfg);
1323 if (hrc != HV_SUCCESS)
1324 return nemR3DarwinHvSts2Rc(hrc);
1325
1326 return rc;
1327}
1328
1329
1330/**
1331 * Try initialize the native API.
1332 *
1333 * This may only do part of the job, more can be done in
1334 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1335 *
1336 * @returns VBox status code.
1337 * @param pVM The cross context VM structure.
1338 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1339 * the latter we'll fail if we cannot initialize.
1340 * @param fForced Whether the HMForced flag is set and we should
1341 * fail if we cannot initialize.
1342 */
1343int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1344{
1345 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
1346
1347 /*
1348 * Some state init.
1349 */
1350 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
1351 RT_NOREF(pCfgNem);
1352
1353 /*
1354 * Error state.
1355 * The error message will be non-empty on failure and 'rc' will be set too.
1356 */
1357 RTERRINFOSTATIC ErrInfo;
1358 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1359
1360 /* Resolve optional imports */
1361 int rc = nemR3DarwinLoadHv(pErrInfo);
1362 if (RT_FAILURE(rc))
1363 {
1364 if ((fForced || !fFallback) && RTErrInfoIsSet(pErrInfo))
1365 return VMSetError(pVM, rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1366 return rc;
1367 }
1368
1369 /*
1370 * Need to enable nested virt here if supported and reset the CFGM value to false
1371 * if not supported. This ASSUMES that NEM is initialized before CPUM.
1372 */
1373 PCFGMNODE pCfgCpum = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/");
1374
1375 hv_vm_config_t hVmCfg = NULL;
1376 if ( hv_vm_config_create
1377 && hv_vm_config_get_el2_supported)
1378 {
1379 pVM->nem.s.fMacOsSequia = true; /* hv_vm_config_get_el2_supported is only available on Sequioa 15.0. */
1380
1381 hVmCfg = hv_vm_config_create();
1382
1383 bool fHvEl2Supported = false;
1384 hv_return_t hrc = hv_vm_config_get_el2_supported(&fHvEl2Supported);
1385 if ( hrc == HV_SUCCESS
1386 && fHvEl2Supported)
1387 {
1388 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
1389 * Whether to expose the hardware virtualization (EL2/VHE) feature to the guest.
1390 * The default is false. Only supported on M3 and later and macOS 15.0+ (Sonoma).
1391 */
1392 bool fNestedHWVirt = false;
1393 rc = CFGMR3QueryBoolDef(pCfgCpum, "NestedHWVirt", &fNestedHWVirt, false);
1394 AssertLogRelRCReturn(rc, rc);
1395 if (fNestedHWVirt)
1396 {
1397 hrc = hv_vm_config_set_el2_enabled(hVmCfg, fNestedHWVirt);
1398 if (hrc != HV_SUCCESS)
1399 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
1400 "Cannot enable nested virtualization: hrc=%#x %s!\n", hrc, nemR3DarwinHvStatusName(hrc));
1401 pVM->nem.s.fEl2Enabled = true;
1402 LogRel(("NEM: Enabled nested virtualization (EL2) support\n"));
1403 }
1404 }
1405 else
1406 {
1407 /* Ensure nested virt is not set. */
1408 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1409 AssertLogRelRC(rc);
1410
1411 LogRel(("NEM: The host doesn't supported nested virtualization! (hrc=%#x fHvEl2Supported=%RTbool)\n",
1412 hrc, fHvEl2Supported));
1413 }
1414 }
1415 else
1416 {
1417 /* Ensure nested virt is not set. */
1418 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1419 AssertLogRelRC(rc);
1420
1421 LogRel(("NEM: Hypervisor.framework doesn't supported nested virtualization!\n"));
1422 }
1423
1424 hv_return_t hrc = hv_vm_create(hVmCfg);
1425 os_release(hVmCfg);
1426 if (hrc == HV_SUCCESS)
1427 {
1428 pVM->nem.s.fCreatedVm = true;
1429 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
1430
1431 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
1432 pVM->nem.s.u64VTimerOff = 0;
1433
1434 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1435 Log(("NEM: Marked active!\n"));
1436 PGMR3EnableNemMode(pVM);
1437 return VINF_SUCCESS;
1438 }
1439
1440 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "hv_vm_create() failed: %#x %s", hrc, nemR3DarwinHvStatusName(hrc));
1441
1442 /*
1443 * We only fail if in forced mode, otherwise just log the complaint and return.
1444 */
1445 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1446 if ( (fForced || !fFallback)
1447 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1448 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1449
1450 if (RTErrInfoIsSet(pErrInfo))
1451 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1452 return VINF_SUCCESS;
1453}
1454
1455
1456/**
1457 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
1458 *
1459 * @returns VBox status code
1460 * @param pVM The VM handle.
1461 * @param pVCpu The vCPU handle.
1462 * @param idCpu ID of the CPU to create.
1463 */
1464static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
1465{
1466 if (idCpu == 0)
1467 {
1468 Assert(pVM->nem.s.hVCpuCfg == NULL);
1469
1470 /* Create a new vCPU config and query the ID registers. */
1471 pVM->nem.s.hVCpuCfg = hv_vcpu_config_create();
1472 if (!pVM->nem.s.hVCpuCfg)
1473 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1474 "Call to hv_vcpu_config_create failed on vCPU %u", idCpu);
1475
1476 /* Query ID registers and hand them to CPUM. */
1477 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
1478 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
1479 {
1480 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
1481 hv_return_t hrc = hv_vcpu_config_get_feature_reg(pVM->nem.s.hVCpuCfg, s_aIdRegs[i].enmHvReg, pu64);
1482 if (hrc != HV_SUCCESS)
1483 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1484 "Call to hv_vcpu_get_feature_reg(, %#x, ) failed: %#x (%Rrc)", hrc, nemR3DarwinHvSts2Rc(hrc));
1485 }
1486
1487 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
1488 if (RT_FAILURE(rc))
1489 return rc;
1490 }
1491
1492 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, pVM->nem.s.hVCpuCfg);
1493 if (hrc != HV_SUCCESS)
1494 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1495 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1496
1497 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
1498 if (hrc != HV_SUCCESS)
1499 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1500 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1501
1502 return VINF_SUCCESS;
1503}
1504
1505
1506/**
1507 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
1508 *
1509 * @returns VBox status code.
1510 * @param pVM The VM handle.
1511 * @param pVCpu The vCPU handle.
1512 */
1513static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVM pVM, PVMCPU pVCpu)
1514{
1515 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1516 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1517
1518 if (pVCpu->idCpu == 0)
1519 {
1520 os_release(pVM->nem.s.hVCpuCfg);
1521 pVM->nem.s.hVCpuCfg = NULL;
1522 }
1523 return VINF_SUCCESS;
1524}
1525
1526
1527/**
1528 * This is called after CPUMR3Init is done.
1529 *
1530 * @returns VBox status code.
1531 * @param pVM The VM handle..
1532 */
1533int nemR3NativeInitAfterCPUM(PVM pVM)
1534{
1535 /*
1536 * Validate sanity.
1537 */
1538 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
1539 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1540
1541 /*
1542 * Need to create the GIC here if the NEM variant is configured
1543 * before any vCPU is created according to the Apple docs.
1544 */
1545 if ( hv_gic_create
1546 && CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0"))
1547 {
1548 int rc = nemR3DarwinGicCreate(pVM);
1549 if (RT_FAILURE(rc))
1550 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Creating the GIC failed: %Rrc", rc);
1551 }
1552
1553 /*
1554 * Setup the EMTs.
1555 */
1556 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1557 {
1558 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1559
1560 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
1561 if (RT_FAILURE(rc))
1562 {
1563 /* Rollback. */
1564 while (idCpu--)
1565 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 2, pVM, pVCpu);
1566
1567 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
1568 }
1569 }
1570
1571 pVM->nem.s.fCreatedEmts = true;
1572 return VINF_SUCCESS;
1573}
1574
1575
1576int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1577{
1578 RT_NOREF(pVM, enmWhat);
1579 return VINF_SUCCESS;
1580}
1581
1582
1583int nemR3NativeTerm(PVM pVM)
1584{
1585 /*
1586 * Delete the VM.
1587 */
1588
1589 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
1590 {
1591 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1592
1593 /*
1594 * Apple's documentation states that the vCPU should be destroyed
1595 * on the thread running the vCPU but as all the other EMTs are gone
1596 * at this point, destroying the VM would hang.
1597 *
1598 * We seem to be at luck here though as destroying apparently works
1599 * from EMT(0) as well.
1600 */
1601 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1602 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1603 }
1604
1605 pVM->nem.s.fCreatedEmts = false;
1606 if (pVM->nem.s.fCreatedVm)
1607 {
1608 hv_return_t hrc = hv_vm_destroy();
1609 if (hrc != HV_SUCCESS)
1610 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
1611
1612 pVM->nem.s.fCreatedVm = false;
1613 }
1614 return VINF_SUCCESS;
1615}
1616
1617
1618/**
1619 * VM reset notification.
1620 *
1621 * @param pVM The cross context VM structure.
1622 */
1623void nemR3NativeReset(PVM pVM)
1624{
1625 RT_NOREF(pVM);
1626}
1627
1628
1629/**
1630 * Reset CPU due to INIT IPI or hot (un)plugging.
1631 *
1632 * @param pVCpu The cross context virtual CPU structure of the CPU being
1633 * reset.
1634 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1635 */
1636void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1637{
1638 RT_NOREF(pVCpu, fInitIpi);
1639}
1640
1641
1642/**
1643 * Returns the byte size from the given access SAS value.
1644 *
1645 * @returns Number of bytes to transfer.
1646 * @param uSas The SAS value to convert.
1647 */
1648DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
1649{
1650 switch (uSas)
1651 {
1652 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1653 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1654 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1655 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1656 default:
1657 AssertReleaseFailed();
1658 }
1659
1660 return 0;
1661}
1662
1663
1664/**
1665 * Sets the given general purpose register to the given value.
1666 *
1667 * @param pVCpu The cross context virtual CPU structure of the
1668 * calling EMT.
1669 * @param uReg The register index.
1670 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1671 * @param fSignExtend Flag whether to sign extend the value.
1672 * @param u64Val The value.
1673 */
1674DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1675{
1676 AssertReturnVoid(uReg < 31);
1677
1678 if (f64BitReg)
1679 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1680 else
1681 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
1682
1683 /* Mark the register as not extern anymore. */
1684 switch (uReg)
1685 {
1686 case 0:
1687 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1688 break;
1689 case 1:
1690 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1691 break;
1692 case 2:
1693 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1694 break;
1695 case 3:
1696 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1697 break;
1698 default:
1699 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1700 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1701 }
1702}
1703
1704
1705/**
1706 * Gets the given general purpose register and returns the value.
1707 *
1708 * @returns Value from the given register.
1709 * @param pVCpu The cross context virtual CPU structure of the
1710 * calling EMT.
1711 * @param uReg The register index.
1712 */
1713DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1714{
1715 AssertReturn(uReg <= ARMV8_A64_REG_XZR, 0);
1716
1717 if (uReg == ARMV8_A64_REG_XZR)
1718 return 0;
1719
1720 /** @todo Import the register if extern. */
1721 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1722
1723 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1724}
1725
1726
1727/**
1728 * Works on the data abort exception (which will be a MMIO access most of the time).
1729 *
1730 * @returns VBox strict status code.
1731 * @param pVM The cross context VM structure.
1732 * @param pVCpu The cross context virtual CPU structure of the
1733 * calling EMT.
1734 * @param uIss The instruction specific syndrome value.
1735 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1736 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
1737 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
1738 */
1739static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
1740 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
1741{
1742 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1743 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1744 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1745 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1746 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1747 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1748 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1749 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
1750 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
1751 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
1752
1753 RT_NOREF(fL2Fault, GCPtrDataAbrt);
1754
1755 if (fWrite)
1756 {
1757 /*
1758 * Check whether this is one of the dirty tracked regions, mark it as dirty
1759 * and enable write support for this region again.
1760 *
1761 * This is required for proper VRAM tracking or the display might not get updated
1762 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
1763 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
1764 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
1765 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
1766 * write access again (due to a missing interpreter right now).
1767 */
1768 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1769 {
1770 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1771
1772 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
1773 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
1774 {
1775 pMmio2Region->fDirty = true;
1776
1777 uint8_t u2State;
1778 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
1779 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
1780
1781 /* Restart the instruction if there is no instruction syndrome available. */
1782 if (RT_FAILURE(rc) || !fIsv)
1783 return rc;
1784 }
1785 }
1786 }
1787
1788 VBOXSTRICTRC rcStrict;
1789 if (fIsv)
1790 {
1791 EMHistoryAddExit(pVCpu,
1792 fWrite
1793 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1794 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1795 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1796
1797 uint64_t u64Val = 0;
1798 if (fWrite)
1799 {
1800 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1801 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1802 Log4(("MmioExit/%u: %08RX64: WRITE %#RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1803 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1804 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1805 }
1806 else
1807 {
1808 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1809 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1810 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1811 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1812 if (rcStrict == VINF_SUCCESS)
1813 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1814 }
1815 }
1816 else
1817 {
1818 /** @todo Our UEFI firmware accesses the flash region with the following instruction
1819 * when the NVRAM actually contains data:
1820 * ldrb w9, [x6, #-0x0001]!
1821 * This is too complicated for the hardware so the ISV bit is not set. Until there
1822 * is a proper IEM implementation we just handle this here for now to avoid annoying
1823 * users too much.
1824 */
1825 /* The following ASSUMES that the vCPU state is completely synced. */
1826
1827 /* Read instruction. */
1828 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
1829 const void *pvPageR3 = NULL;
1830 PGMPAGEMAPLOCK PageMapLock;
1831
1832 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
1833 if (rcStrict == VINF_SUCCESS)
1834 {
1835 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
1836 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
1837
1838 DISSTATE Dis;
1839 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
1840 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
1841 if (rcStrict == VINF_SUCCESS)
1842 {
1843 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
1844 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
1845 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1846 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
1847 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
1848 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
1849 {
1850 /* The fault address is already the final address. */
1851 uint8_t bVal = 0;
1852 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &bVal, 1, PGMACCESSORIGIN_HM);
1853 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1854 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, sizeof(bVal), sizeof(bVal),
1855 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
1856 if (rcStrict == VINF_SUCCESS)
1857 {
1858 nemR3DarwinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
1859 /* Update the indexed register. */
1860 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
1861 }
1862 }
1863 /*
1864 * Seeing the following with the Windows 11/ARM TPM driver:
1865 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
1866 */
1867 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
1868 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
1869 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1870 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
1871 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1872 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
1873 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
1874 {
1875 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
1876 /* The fault address is already the final address. */
1877 uint32_t u32Val1 = 0;
1878 uint32_t u32Val2 = 0;
1879 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
1880 if (rcStrict == VINF_SUCCESS)
1881 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
1882 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
1883 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, 2 * sizeof(uint32_t), sizeof(u32Val1),
1884 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
1885 if (rcStrict == VINF_SUCCESS)
1886 {
1887 nemR3DarwinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
1888 nemR3DarwinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
1889 }
1890 }
1891 /* T O D O:
1892 * Recent W11:
1893 * x0=ffffb804ea3217d8 x1=ffffe28437802000 x2=0000000000000424 x3=fffff802e5716030
1894 * x4=ffffe28437802424 x5=ffffb804ea321bfc x6=000000000080009c x7=000000000080009c
1895 * x8=ffff87849fefc788 x9=ffff87849fefc788 x10=000000000000001c x11=ffffb804ea32909c
1896 * x12=000000000000001c x13=000000000000009c x14=ffffb804ea3290a8 x15=ffffd580b2b1f7d8
1897 * x16=0000f6999080cdbe x17=0000f6999080cdbe x18=ffffd08158fbf000 x19=ffffb804ea3217d0
1898 * x20=0000000000000001 x21=0000000000000004 x22=ffffb804ea321660 x23=000047fb15cdefd8
1899 * x24=0000000000000000 x25=ffffb804ea2f1080 x26=0000000000000000 x27=0000000000000380
1900 * x28=0000000000000000 x29=ffff87849fefc7e0 x30=fffff802e57120b0
1901 * pc=fffff802e5713c20 pstate=00000000a0001344
1902 * sp_el0=ffff87849fefc7e0 sp_el1=ffff87849e462400 elr_el1=fffff802e98889c8
1903 * pl061gpio!start_seg1_.text+0x2c20:
1904 * %fffff802e5713c20 23 00 c0 3d ldr q3, [x1]
1905 * VBoxDbg> format %%(%@x1)
1906 * Guest physical address: %%ffddd000
1907 * VBoxDbg> info mmio
1908 * MMIO registrations: 12 (186 allocated)
1909 * ## Ctx Size Mapping PCI Description
1910 * 0 R3 00000000000c0000 0000000004000000-00000000040bffff Flash Memory
1911 * [snip]
1912 * 11 R3 0000000000001000 00000000ffddd000-00000000ffdddfff PL061
1913 */
1914 else
1915 AssertLogRelMsgFailedReturn(("pc=%#RX64: %#x opcode=%d\n",
1916 pVCpu->cpum.GstCtx.Pc.u64, Dis.Instr.au32[0], Dis.pCurInstr->uOpcode),
1917 VERR_NEM_IPE_2);
1918 }
1919 }
1920 }
1921
1922 if (rcStrict == VINF_SUCCESS)
1923 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1924
1925 return rcStrict;
1926}
1927
1928
1929/**
1930 * Works on the trapped MRS, MSR and system instruction exception.
1931 *
1932 * @returns VBox strict status code.
1933 * @param pVM The cross context VM structure.
1934 * @param pVCpu The cross context virtual CPU structure of the
1935 * calling EMT.
1936 * @param uIss The instruction specific syndrome value.
1937 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1938 */
1939static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1940{
1941 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1942 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1943 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1944 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1945 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1946 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1947 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1948 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1949 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1950 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1951
1952 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1953 EMHistoryAddExit(pVCpu,
1954 fRead
1955 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1956 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1957 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1958
1959 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1960 uint64_t u64Val = 0;
1961 if (fRead)
1962 {
1963 RT_NOREF(pVM);
1964 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1965 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1966 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1967 VBOXSTRICTRC_VAL(rcStrict) ));
1968 if (rcStrict == VINF_SUCCESS)
1969 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1970 }
1971 else
1972 {
1973 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1974 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1975 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1976 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1977 VBOXSTRICTRC_VAL(rcStrict) ));
1978 }
1979
1980 if (rcStrict == VINF_SUCCESS)
1981 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1982
1983 return rcStrict;
1984}
1985
1986
1987/**
1988 * Works on the trapped HVC instruction exception.
1989 *
1990 * @returns VBox strict status code.
1991 * @param pVM The cross context VM structure.
1992 * @param pVCpu The cross context virtual CPU structure of the
1993 * calling EMT.
1994 * @param uIss The instruction specific syndrome value.
1995 * @param fAdvancePc Flag whether to advance the guest program counter.
1996 */
1997static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fAdvancePc = false)
1998{
1999 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
2000 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
2001
2002#if 0 /** @todo For later */
2003 EMHistoryAddExit(pVCpu,
2004 fRead
2005 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
2006 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
2007 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2008#endif
2009
2010 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2011 if (u16Imm == 0)
2012 {
2013 /** @todo Raise exception to EL1 if PSCI not configured. */
2014 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2015 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_A64_REG_X0].w;
2016 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2017 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2018 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2019 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2020 {
2021 switch (uFunNum)
2022 {
2023 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2024 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2025 break;
2026 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2027 rcStrict = VMR3PowerOff(pVM->pUVM);
2028 break;
2029 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2030 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2031 {
2032 bool fHaltOnReset;
2033 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2034 if (RT_SUCCESS(rc) && fHaltOnReset)
2035 {
2036 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
2037 rcStrict = VINF_EM_HALT;
2038 }
2039 else
2040 {
2041 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2042 VM_FF_SET(pVM, VM_FF_RESET);
2043 rcStrict = VINF_EM_RESET;
2044 }
2045 break;
2046 }
2047 case ARM_PSCI_FUNC_ID_CPU_ON:
2048 {
2049 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_A64_REG_X1);
2050 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_A64_REG_X2);
2051 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_A64_REG_X3);
2052 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2053 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2054 break;
2055 }
2056 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2057 {
2058 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_A64_REG_X1);
2059 switch (u32FunNum)
2060 {
2061 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2062 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2063 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2064 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2065 case ARM_PSCI_FUNC_ID_CPU_ON:
2066 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
2067 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2068 false /*f64BitReg*/, false /*fSignExtend*/,
2069 (uint64_t)ARM_PSCI_STS_SUCCESS);
2070 break;
2071 default:
2072 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2073 false /*f64BitReg*/, false /*fSignExtend*/,
2074 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2075 }
2076 break;
2077 }
2078 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
2079 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_MIGRATE_INFO_TYPE_TOS_NOT_PRESENT);
2080 break;
2081 default:
2082 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2083 }
2084 }
2085 else
2086 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2087 }
2088
2089 /** @todo What to do if immediate is != 0? */
2090
2091 if ( rcStrict == VINF_SUCCESS
2092 && fAdvancePc)
2093 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2094
2095 return rcStrict;
2096}
2097
2098
2099/**
2100 * Handles an exception VM exit.
2101 *
2102 * @returns VBox strict status code.
2103 * @param pVM The cross context VM structure.
2104 * @param pVCpu The cross context virtual CPU structure of the
2105 * calling EMT.
2106 * @param pExit Pointer to the exit information.
2107 */
2108static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
2109{
2110 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
2111 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
2112 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
2113
2114 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
2115 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
2116
2117 switch (uEc)
2118 {
2119 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
2120 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
2121 pExit->exception.physical_address);
2122 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
2123 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
2124 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
2125 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
2126 case ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN:
2127 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss, true);
2128 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
2129 {
2130 /* No need to halt if there is an interrupt pending already. */
2131 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
2132 {
2133 LogFlowFunc(("IRQ | FIQ set => VINF_SUCCESS\n"));
2134 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2135 return VINF_SUCCESS;
2136 }
2137
2138 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
2139 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
2140 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
2141 {
2142 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
2143
2144 /* Check whether it expired and start executing guest code. */
2145 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
2146 {
2147 LogFlowFunc(("Guest timer expired (cTicksVTimer=%RU64 CntvCValEl0=%RU64) => VINF_SUCCESS\n",
2148 cTicksVTimer, pVCpu->cpum.GstCtx.CntvCValEl0));
2149 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2150 return VINF_SUCCESS;
2151 }
2152
2153 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
2154 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
2155
2156 /*
2157 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
2158 * + scheduling overhead which would increase the wakeup latency.
2159 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
2160 * between CPU load when the guest is idle and performance).
2161 */
2162 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
2163 {
2164 LogFlowFunc(("Guest timer expiration < 2ms (cNanoSecsVTimerToExpire=%RU64) => VINF_SUCCESS\n",
2165 cNanoSecsVTimerToExpire));
2166 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2167 return VINF_SUCCESS;
2168 }
2169
2170 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
2171 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
2172 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
2173 }
2174 else
2175 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2176
2177 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2178 return VINF_EM_HALT;
2179 }
2180 case ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN:
2181 {
2182 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
2183 /** @todo Forward genuine guest traps to the guest by either single stepping instruction with debug exception trapping turned off
2184 * or create instruction interpreter and inject exception ourselves. */
2185 Assert(rcStrict == VINF_EM_DBG_BREAKPOINT);
2186 return rcStrict;
2187 }
2188 case ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL:
2189 return VINF_EM_DBG_STEPPED;
2190 case ARMV8_ESR_EL2_EC_UNKNOWN:
2191 default:
2192 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
2193 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
2194 AssertReleaseFailed();
2195 return VERR_NOT_IMPLEMENTED;
2196 }
2197
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/**
2203 * Handles an exit from hv_vcpu_run().
2204 *
2205 * @returns VBox strict status code.
2206 * @param pVM The cross context VM structure.
2207 * @param pVCpu The cross context virtual CPU structure of the
2208 * calling EMT.
2209 */
2210static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
2211{
2212 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2213 if (RT_FAILURE(rc))
2214 return rc;
2215
2216#ifdef LOG_ENABLED
2217 if (LogIs3Enabled())
2218 nemR3DarwinLogState(pVM, pVCpu);
2219#endif
2220
2221 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
2222 switch (pExit->reason)
2223 {
2224 case HV_EXIT_REASON_CANCELED:
2225 return VINF_EM_RAW_INTERRUPT;
2226 case HV_EXIT_REASON_EXCEPTION:
2227 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
2228 case HV_EXIT_REASON_VTIMER_ACTIVATED:
2229 {
2230 LogFlowFunc(("vTimer got activated\n"));
2231 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2232 pVCpu->nem.s.fVTimerActivated = true;
2233 return PDMGicSetPpi(pVCpu, pVM->nem.s.u32GicPpiVTimer, true /*fAsserted*/);
2234 }
2235 default:
2236 AssertReleaseFailed();
2237 break;
2238 }
2239
2240 return VERR_INVALID_STATE;
2241}
2242
2243
2244/**
2245 * Runs the guest once until an exit occurs.
2246 *
2247 * @returns HV status code.
2248 * @param pVM The cross context VM structure.
2249 * @param pVCpu The cross context virtual CPU structure.
2250 */
2251static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
2252{
2253 TMNotifyStartOfExecution(pVM, pVCpu);
2254
2255 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
2256
2257 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2258
2259 return hrc;
2260}
2261
2262
2263/**
2264 * Prepares the VM to run the guest.
2265 *
2266 * @returns Strict VBox status code.
2267 * @param pVM The cross context VM structure.
2268 * @param pVCpu The cross context virtual CPU structure.
2269 * @param fSingleStepping Flag whether we run in single stepping mode.
2270 */
2271static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
2272{
2273#ifdef LOG_ENABLED
2274 bool fIrq = false;
2275 bool fFiq = false;
2276
2277 if (LogIs3Enabled())
2278 nemR3DarwinLogState(pVM, pVCpu);
2279#endif
2280
2281 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
2282 AssertRCReturn(rc, rc);
2283
2284 /* In single stepping mode we will re-read SPSR and MDSCR and enable the software step bits. */
2285 if (fSingleStepping)
2286 {
2287 uint64_t u64Tmp;
2288 hv_return_t hrc = hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
2289 if (hrc == HV_SUCCESS)
2290 {
2291 u64Tmp |= ARMV8_SPSR_EL2_AARCH64_SS;
2292 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, u64Tmp);
2293 }
2294
2295 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, &u64Tmp);
2296 if (hrc == HV_SUCCESS)
2297 {
2298 u64Tmp |= ARMV8_MDSCR_EL1_AARCH64_SS;
2299 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, u64Tmp);
2300 }
2301
2302 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2303 }
2304
2305 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
2306 if (pVCpu->nem.s.fVTimerActivated)
2307 {
2308 /* Read the CNTV_CTL_EL0 register. */
2309 uint64_t u64CntvCtl = 0;
2310
2311 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
2312 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2313
2314 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2315 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2316 {
2317 /* Clear the interrupt. */
2318 PDMGicSetPpi(pVCpu, pVM->nem.s.u32GicPpiVTimer, false /*fAsserted*/);
2319
2320 pVCpu->nem.s.fVTimerActivated = false;
2321 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
2322 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2323 }
2324 }
2325
2326 /* Set the pending interrupt state. */
2327 hv_return_t hrc = HV_SUCCESS;
2328 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
2329 {
2330 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
2331 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2332#ifdef LOG_ENABLED
2333 fIrq = true;
2334#endif
2335 }
2336 else
2337 {
2338 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
2339 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2340 }
2341
2342 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
2343 {
2344 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
2345 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2346#ifdef LOG_ENABLED
2347 fFiq = true;
2348#endif
2349 }
2350 else
2351 {
2352 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
2353 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2354 }
2355
2356 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
2357 pVCpu->nem.s.fEventPending = false;
2358 return VINF_SUCCESS;
2359}
2360
2361
2362/**
2363 * The normal runloop (no debugging features enabled).
2364 *
2365 * @returns Strict VBox status code.
2366 * @param pVM The cross context VM structure.
2367 * @param pVCpu The cross context virtual CPU structure.
2368 */
2369static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
2370{
2371 /*
2372 * The run loop.
2373 *
2374 * Current approach to state updating to use the sledgehammer and sync
2375 * everything every time. This will be optimized later.
2376 */
2377
2378 /* Update the vTimer offset after resuming if instructed. */
2379 if (pVCpu->nem.s.fVTimerOffUpdate)
2380 {
2381 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2382 if (hrc != HV_SUCCESS)
2383 return nemR3DarwinHvSts2Rc(hrc);
2384
2385 pVCpu->nem.s.fVTimerOffUpdate = false;
2386
2387 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2388 if (hrc == HV_SUCCESS)
2389 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2390 if (hrc != HV_SUCCESS)
2391 return nemR3DarwinHvSts2Rc(hrc);
2392 }
2393
2394 /*
2395 * Poll timers and run for a bit.
2396 */
2397 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2398 * the whole polling job when timers have changed... */
2399 uint64_t offDeltaIgnored;
2400 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2401 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2402 for (unsigned iLoop = 0;; iLoop++)
2403 {
2404 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
2405 if (rcStrict != VINF_SUCCESS)
2406 break;
2407
2408 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2409 if (hrc == HV_SUCCESS)
2410 {
2411 /*
2412 * Deal with the message.
2413 */
2414 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2415 if (rcStrict == VINF_SUCCESS)
2416 { /* hopefully likely */ }
2417 else
2418 {
2419 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2420 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2421 break;
2422 }
2423 }
2424 else
2425 {
2426 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2427 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2428 }
2429 } /* the run loop */
2430
2431 return rcStrict;
2432}
2433
2434
2435/**
2436 * The debug runloop.
2437 *
2438 * @returns Strict VBox status code.
2439 * @param pVM The cross context VM structure.
2440 * @param pVCpu The cross context virtual CPU structure.
2441 */
2442static VBOXSTRICTRC nemR3DarwinRunGuestDebug(PVM pVM, PVMCPU pVCpu)
2443{
2444 /*
2445 * The run loop.
2446 *
2447 * Current approach to state updating to use the sledgehammer and sync
2448 * everything every time. This will be optimized later.
2449 */
2450
2451 bool const fSavedSingleInstruction = pVCpu->nem.s.fSingleInstruction;
2452 pVCpu->nem.s.fSingleInstruction = pVCpu->nem.s.fSingleInstruction || DBGFIsStepping(pVCpu);
2453 pVCpu->nem.s.fUsingDebugLoop = true;
2454
2455 /* Trap any debug exceptions. */
2456 hv_return_t hrc = hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, true);
2457 if (hrc != HV_SUCCESS)
2458 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2459 "Trapping debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2460
2461 /* Update the vTimer offset after resuming if instructed. */
2462 if (pVCpu->nem.s.fVTimerOffUpdate)
2463 {
2464 hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2465 if (hrc != HV_SUCCESS)
2466 return nemR3DarwinHvSts2Rc(hrc);
2467
2468 pVCpu->nem.s.fVTimerOffUpdate = false;
2469
2470 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2471 if (hrc == HV_SUCCESS)
2472 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2473 if (hrc != HV_SUCCESS)
2474 return nemR3DarwinHvSts2Rc(hrc);
2475 }
2476
2477 /* Save the guest MDSCR_EL1 */
2478 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2479 uint64_t u64RegMdscrEl1 = pVCpu->cpum.GstCtx.Mdscr.u64;
2480
2481 /*
2482 * Poll timers and run for a bit.
2483 */
2484 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2485 * the whole polling job when timers have changed... */
2486 uint64_t offDeltaIgnored;
2487 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2488 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2489 for (unsigned iLoop = 0;; iLoop++)
2490 {
2491 bool const fStepping = pVCpu->nem.s.fSingleInstruction;
2492
2493 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, fStepping);
2494 if (rcStrict != VINF_SUCCESS)
2495 break;
2496
2497 hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2498 if (hrc == HV_SUCCESS)
2499 {
2500 /*
2501 * Deal with the message.
2502 */
2503 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2504 if (rcStrict == VINF_SUCCESS)
2505 { /* hopefully likely */ }
2506 else
2507 {
2508 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2509 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2510 break;
2511 }
2512 }
2513 else
2514 {
2515 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2516 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2517 }
2518 } /* the run loop */
2519
2520 /* Restore single stepping state. */
2521 if (pVCpu->nem.s.fSingleInstruction)
2522 {
2523 /** @todo This ASSUMES that guest code being single stepped is not modifying the MDSCR_EL1 register. */
2524 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2525 Assert(pVCpu->cpum.GstCtx.Mdscr.u64 & ARMV8_MDSCR_EL1_AARCH64_SS);
2526
2527 pVCpu->cpum.GstCtx.Mdscr.u64 = u64RegMdscrEl1;
2528 }
2529
2530 /* Restore debug exceptions trapping. */
2531 hrc |= hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, false);
2532 if (hrc != HV_SUCCESS)
2533 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2534 "Clearing trapping of debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2535
2536 pVCpu->nem.s.fUsingDebugLoop = false;
2537 pVCpu->nem.s.fSingleInstruction = fSavedSingleInstruction;
2538
2539 return rcStrict;
2540
2541}
2542
2543
2544VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2545{
2546#ifdef LOG_ENABLED
2547 if (LogIs3Enabled())
2548 nemR3DarwinLogState(pVM, pVCpu);
2549#endif
2550
2551 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
2552
2553 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2554 {
2555 /*
2556 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2557 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2558 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2559 */
2560 static const struct
2561 {
2562 const char *pszIdReg;
2563 hv_sys_reg_t enmHvReg;
2564 uint32_t offIdStruct;
2565 } s_aSysIdRegs[] =
2566 {
2567#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMARMV8IDREGS, a_CpumIdReg) }
2568 ID_SYS_REG_CREATE(ID_AA64DFR0_EL1, u64RegIdAa64Dfr0El1),
2569 ID_SYS_REG_CREATE(ID_AA64DFR1_EL1, u64RegIdAa64Dfr1El1),
2570 ID_SYS_REG_CREATE(ID_AA64ISAR0_EL1, u64RegIdAa64Isar0El1),
2571 ID_SYS_REG_CREATE(ID_AA64ISAR1_EL1, u64RegIdAa64Isar1El1),
2572 ID_SYS_REG_CREATE(ID_AA64MMFR0_EL1, u64RegIdAa64Mmfr0El1),
2573 ID_SYS_REG_CREATE(ID_AA64MMFR1_EL1, u64RegIdAa64Mmfr1El1),
2574 ID_SYS_REG_CREATE(ID_AA64MMFR2_EL1, u64RegIdAa64Mmfr2El1),
2575 ID_SYS_REG_CREATE(ID_AA64PFR0_EL1, u64RegIdAa64Pfr0El1),
2576 ID_SYS_REG_CREATE(ID_AA64PFR1_EL1, u64RegIdAa64Pfr1El1),
2577#undef ID_SYS_REG_CREATE
2578 };
2579
2580 PCCPUMARMV8IDREGS pIdRegsGst = NULL;
2581 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2582 AssertRCReturn(rc, rc);
2583
2584 for (uint32_t i = 0; i < RT_ELEMENTS(s_aSysIdRegs); i++)
2585 {
2586 uint64_t *pu64 = (uint64_t *)((uint8_t *)pIdRegsGst + s_aSysIdRegs[i].offIdStruct);
2587 hv_return_t hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aSysIdRegs[i].enmHvReg, *pu64);
2588 if (hrc != HV_SUCCESS)
2589 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2590 "Setting %s failed on vCPU %u: %#x (%Rrc)", s_aSysIdRegs[i].pszIdReg, pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2591 }
2592
2593 pVCpu->nem.s.fIdRegsSynced = true;
2594 }
2595
2596 /*
2597 * Try switch to NEM runloop state.
2598 */
2599 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2600 { /* likely */ }
2601 else
2602 {
2603 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2604 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2605 return VINF_SUCCESS;
2606 }
2607
2608 VBOXSTRICTRC rcStrict;
2609 if ( !pVCpu->nem.s.fUseDebugLoop
2610 /*&& !nemR3DarwinAnyExpensiveProbesEnabled()*/
2611 && !DBGFIsStepping(pVCpu)
2612 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledSwBreakpoints)
2613 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
2614 else
2615 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
2616
2617 if (rcStrict == VINF_EM_RAW_TO_R3)
2618 rcStrict = VINF_SUCCESS;
2619
2620 /*
2621 * Convert any pending HM events back to TRPM due to premature exits.
2622 *
2623 * This is because execution may continue from IEM and we would need to inject
2624 * the event from there (hence place it back in TRPM).
2625 */
2626 if (pVCpu->nem.s.fEventPending)
2627 {
2628 /** @todo */
2629 }
2630
2631
2632 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2633 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2634
2635 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
2636 {
2637 /* Try anticipate what we might need. */
2638 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2639 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2640 || RT_FAILURE(rcStrict))
2641 fImport = CPUMCTX_EXTRN_ALL;
2642 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
2643 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2644 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2645
2646 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2647 {
2648 /* Only import what is external currently. */
2649 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
2650 if (RT_SUCCESS(rc2))
2651 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2652 else if (RT_SUCCESS(rcStrict))
2653 rcStrict = rc2;
2654 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2655 pVCpu->cpum.GstCtx.fExtrn = 0;
2656 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2657 }
2658 else
2659 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2660 }
2661 else
2662 {
2663 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2664 pVCpu->cpum.GstCtx.fExtrn = 0;
2665 }
2666
2667 return rcStrict;
2668}
2669
2670
2671VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2672{
2673 RT_NOREF(pVM, pVCpu);
2674 return true; /** @todo Are there any cases where we have to emulate? */
2675}
2676
2677
2678bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2679{
2680 VMCPU_ASSERT_EMT(pVCpu);
2681 bool fOld = pVCpu->nem.s.fSingleInstruction;
2682 pVCpu->nem.s.fSingleInstruction = fEnable;
2683 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
2684 return fOld;
2685}
2686
2687
2688void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2689{
2690 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
2691
2692 RT_NOREF(pVM, fFlags);
2693
2694 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
2695 if (hrc != HV_SUCCESS)
2696 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
2697}
2698
2699
2700DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2701{
2702 RT_NOREF(pVM, fUseDebugLoop);
2703 //AssertReleaseFailed();
2704 return false;
2705}
2706
2707
2708DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2709{
2710 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2711 return fUseDebugLoop;
2712}
2713
2714
2715VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2716 uint8_t *pu2State, uint32_t *puNemRange)
2717{
2718 RT_NOREF(pVM, puNemRange);
2719
2720 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
2721#if defined(VBOX_WITH_PGM_NEM_MODE)
2722 if (pvR3)
2723 {
2724 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2725 if (RT_FAILURE(rc))
2726 {
2727 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
2728 return VERR_NEM_MAP_PAGES_FAILED;
2729 }
2730 }
2731 return VINF_SUCCESS;
2732#else
2733 RT_NOREF(pVM, GCPhys, cb, pvR3);
2734 return VERR_NEM_MAP_PAGES_FAILED;
2735#endif
2736}
2737
2738
2739VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2740{
2741 RT_NOREF(pVM);
2742 return true;
2743}
2744
2745
2746VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2747 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2748{
2749 RT_NOREF(pvRam);
2750
2751 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
2752 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
2753
2754#if defined(VBOX_WITH_PGM_NEM_MODE)
2755 /*
2756 * Unmap the RAM we're replacing.
2757 */
2758 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2759 {
2760 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2761 if (RT_SUCCESS(rc))
2762 { /* likely */ }
2763 else if (pvMmio2)
2764 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
2765 GCPhys, cb, fFlags, rc));
2766 else
2767 {
2768 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2769 GCPhys, cb, fFlags, rc));
2770 return VERR_NEM_UNMAP_PAGES_FAILED;
2771 }
2772 }
2773
2774 /*
2775 * Map MMIO2 if any.
2776 */
2777 if (pvMmio2)
2778 {
2779 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2780
2781 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
2782 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
2783 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2784 {
2785 /* Find a slot for dirty tracking. */
2786 PNEMHVMMIO2REGION pMmio2Region = NULL;
2787 uint32_t idSlot;
2788 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
2789 {
2790 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
2791 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
2792 {
2793 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
2794 break;
2795 }
2796 }
2797
2798 if (!pMmio2Region)
2799 {
2800 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
2801 return VERR_NEM_MAP_PAGES_FAILED;
2802 }
2803
2804 pMmio2Region->GCPhysStart = GCPhys;
2805 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
2806 pMmio2Region->fDirty = false;
2807 *puNemRange = idSlot;
2808 }
2809 else
2810 fProt |= NEM_PAGE_PROT_WRITE;
2811
2812 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
2813 if (RT_FAILURE(rc))
2814 {
2815 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
2816 GCPhys, cb, fFlags, pvMmio2, rc));
2817 return VERR_NEM_MAP_PAGES_FAILED;
2818 }
2819 }
2820 else
2821 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2822
2823#else
2824 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
2825 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
2826#endif
2827 return VINF_SUCCESS;
2828}
2829
2830
2831VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2832 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2833{
2834 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2835 return VINF_SUCCESS;
2836}
2837
2838
2839VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2840 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2841{
2842 RT_NOREF(pVM, puNemRange);
2843
2844 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2845 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2846
2847 int rc = VINF_SUCCESS;
2848#if defined(VBOX_WITH_PGM_NEM_MODE)
2849 /*
2850 * Unmap the MMIO2 pages.
2851 */
2852 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2853 * we may have more stuff to unmap even in case of pure MMIO... */
2854 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2855 {
2856 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2857 if (RT_FAILURE(rc))
2858 {
2859 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2860 GCPhys, cb, fFlags, rc));
2861 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2862 }
2863
2864 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2865 {
2866 /* Reset tracking structure. */
2867 uint32_t idSlot = *puNemRange;
2868 *puNemRange = UINT32_MAX;
2869
2870 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2871 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
2872 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
2873 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
2874 }
2875 }
2876
2877 /* Ensure the page is masked as unmapped if relevant. */
2878 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
2879
2880 /*
2881 * Restore the RAM we replaced.
2882 */
2883 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2884 {
2885 AssertPtr(pvRam);
2886 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2887 if (RT_SUCCESS(rc))
2888 { /* likely */ }
2889 else
2890 {
2891 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
2892 rc = VERR_NEM_MAP_PAGES_FAILED;
2893 }
2894 }
2895
2896 RT_NOREF(pvMmio2);
2897#else
2898 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
2899 if (pu2State)
2900 *pu2State = UINT8_MAX;
2901 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2902#endif
2903 return rc;
2904}
2905
2906
2907VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2908 void *pvBitmap, size_t cbBitmap)
2909{
2910 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
2911 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2912
2913 /* Keep it simple for now and mark everything as dirty if it is. */
2914 int rc = VINF_SUCCESS;
2915 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
2916 {
2917 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
2918
2919 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
2920 /* Restore as RX only. */
2921 uint8_t u2State;
2922 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
2923 }
2924 else
2925 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
2926
2927 return rc;
2928}
2929
2930
2931VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2932 uint8_t *pu2State, uint32_t *puNemRange)
2933{
2934 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2935
2936 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2937 *pu2State = UINT8_MAX;
2938 *puNemRange = 0;
2939 return VINF_SUCCESS;
2940}
2941
2942
2943VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2944 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2945{
2946 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2947 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2948 *pu2State = UINT8_MAX;
2949
2950#if defined(VBOX_WITH_PGM_NEM_MODE)
2951 /*
2952 * (Re-)map readonly.
2953 */
2954 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2955
2956 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2957 AssertRC(rc);
2958
2959 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
2960 if (RT_FAILURE(rc))
2961 {
2962 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
2963 GCPhys, cb, pvPages, fFlags, rc));
2964 return VERR_NEM_MAP_PAGES_FAILED;
2965 }
2966 RT_NOREF(fFlags, puNemRange);
2967 return VINF_SUCCESS;
2968#else
2969 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2970 return VERR_NEM_MAP_PAGES_FAILED;
2971#endif
2972}
2973
2974
2975VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2976 RTR3PTR pvMemR3, uint8_t *pu2State)
2977{
2978 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2979 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2980
2981 *pu2State = UINT8_MAX;
2982#if defined(VBOX_WITH_PGM_NEM_MODE)
2983 if (pvMemR3)
2984 {
2985 /* Unregister what was there before. */
2986 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2987 AssertRC(rc);
2988
2989 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2990 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
2991 pvMemR3, GCPhys, cb, rc));
2992 }
2993 RT_NOREF(enmKind);
2994#else
2995 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
2996 AssertFailed();
2997#endif
2998}
2999
3000
3001VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
3002{
3003 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
3004 RT_NOREF(pVCpu, fEnabled);
3005}
3006
3007
3008void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3009{
3010 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3011 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3012}
3013
3014
3015void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3016 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3017{
3018 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3019 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3020 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3021}
3022
3023
3024int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3025 PGMPAGETYPE enmType, uint8_t *pu2State)
3026{
3027 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3028 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3029 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
3030
3031 AssertFailed();
3032 return VINF_SUCCESS;
3033}
3034
3035
3036VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3037 PGMPAGETYPE enmType, uint8_t *pu2State)
3038{
3039 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3040 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3041 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
3042}
3043
3044
3045VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3046 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3047{
3048 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3049 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
3050 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
3051
3052 AssertFailed();
3053}
3054
3055
3056/**
3057 * Interface for importing state on demand (used by IEM).
3058 *
3059 * @returns VBox status code.
3060 * @param pVCpu The cross context CPU structure.
3061 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3062 */
3063VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
3064{
3065 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
3066 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
3067
3068 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
3069}
3070
3071
3072/**
3073 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
3074 *
3075 * @returns VBox status code.
3076 * @param pVCpu The cross context CPU structure.
3077 * @param pcTicks Where to return the CPU tick count.
3078 * @param puAux Where to return the TSC_AUX register value.
3079 */
3080VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
3081{
3082 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
3083 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
3084
3085 if (puAux)
3086 *puAux = 0;
3087 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
3088 return VINF_SUCCESS;
3089}
3090
3091
3092/**
3093 * Resumes CPU clock (TSC) on all virtual CPUs.
3094 *
3095 * This is called by TM when the VM is started, restored, resumed or similar.
3096 *
3097 * @returns VBox status code.
3098 * @param pVM The cross context VM structure.
3099 * @param pVCpu The cross context CPU structure of the calling EMT.
3100 * @param uPausedTscValue The TSC value at the time of pausing.
3101 */
3102VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
3103{
3104 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
3105 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
3106 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
3107
3108 /*
3109 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
3110 * the new offset to let the guest not notice the pause.
3111 */
3112 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
3113 Assert(u64TscNew >= uPausedTscValue);
3114 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
3115 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
3116 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
3117
3118 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
3119
3120 /*
3121 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
3122 * (needs to be done on the actual EMT).
3123 */
3124 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3125 {
3126 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
3127 pVCpuDst->nem.s.fVTimerOffUpdate = true;
3128 }
3129
3130 return VINF_SUCCESS;
3131}
3132
3133
3134/**
3135 * Returns features supported by the NEM backend.
3136 *
3137 * @returns Flags of features supported by the native NEM backend.
3138 * @param pVM The cross context VM structure.
3139 */
3140VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3141{
3142 RT_NOREF(pVM);
3143 /*
3144 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
3145 * and unrestricted guest execution support so we can safely return these flags here always.
3146 */
3147 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
3148}
3149
3150
3151/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
3152 *
3153 * @todo Add notes as the implementation progresses...
3154 */
3155
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette