VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 96860

Last change on this file since 96860 was 96821, checked in by vboxsync, 19 months ago

VMM/IEM: Define separate log groups for the VMX and SVM code in IEM since we're more or less out of log levels to use in IEM (and the code isn't following the assignments). Defined Log2 to be for logging vmexits. Needs more cleaning up.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 459.8 KB
Line 
1/* $Id: IEMAll.cpp 96821 2022-09-22 00:35:59Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <VBox/disopcode.h>
130#include <iprt/asm-math.h>
131#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
132# include <iprt/asm-amd64-x86.h>
133#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
134# include <iprt/asm-arm.h>
135#endif
136#include <iprt/assert.h>
137#include <iprt/string.h>
138#include <iprt/x86.h>
139
140#include "IEMInline.h"
141
142
143/*********************************************************************************************************************************
144* Structures and Typedefs *
145*********************************************************************************************************************************/
146/**
147 * CPU exception classes.
148 */
149typedef enum IEMXCPTCLASS
150{
151 IEMXCPTCLASS_BENIGN,
152 IEMXCPTCLASS_CONTRIBUTORY,
153 IEMXCPTCLASS_PAGE_FAULT,
154 IEMXCPTCLASS_DOUBLE_FAULT
155} IEMXCPTCLASS;
156
157
158/*********************************************************************************************************************************
159* Global Variables *
160*********************************************************************************************************************************/
161#if defined(IEM_LOG_MEMORY_WRITES)
162/** What IEM just wrote. */
163uint8_t g_abIemWrote[256];
164/** How much IEM just wrote. */
165size_t g_cbIemWrote;
166#endif
167
168
169/*********************************************************************************************************************************
170* Internal Functions *
171*********************************************************************************************************************************/
172static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
173 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
174
175
176/**
177 * Initializes the decoder state.
178 *
179 * iemReInitDecoder is mostly a copy of this function.
180 *
181 * @param pVCpu The cross context virtual CPU structure of the
182 * calling thread.
183 * @param fBypassHandlers Whether to bypass access handlers.
184 * @param fDisregardLock Whether to disregard the LOCK prefix.
185 */
186DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
187{
188 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
189 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
191 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
192 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
198
199 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
200 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
201 pVCpu->iem.s.enmCpuMode = enmMode;
202 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
203 pVCpu->iem.s.enmEffAddrMode = enmMode;
204 if (enmMode != IEMMODE_64BIT)
205 {
206 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
207 pVCpu->iem.s.enmEffOpSize = enmMode;
208 }
209 else
210 {
211 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
212 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
213 }
214 pVCpu->iem.s.fPrefixes = 0;
215 pVCpu->iem.s.uRexReg = 0;
216 pVCpu->iem.s.uRexB = 0;
217 pVCpu->iem.s.uRexIndex = 0;
218 pVCpu->iem.s.idxPrefix = 0;
219 pVCpu->iem.s.uVex3rdReg = 0;
220 pVCpu->iem.s.uVexLength = 0;
221 pVCpu->iem.s.fEvexStuff = 0;
222 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
223#ifdef IEM_WITH_CODE_TLB
224 pVCpu->iem.s.pbInstrBuf = NULL;
225 pVCpu->iem.s.offInstrNextByte = 0;
226 pVCpu->iem.s.offCurInstrStart = 0;
227# ifdef VBOX_STRICT
228 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
229 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
230 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
231# endif
232#else
233 pVCpu->iem.s.offOpcode = 0;
234 pVCpu->iem.s.cbOpcode = 0;
235#endif
236 pVCpu->iem.s.offModRm = 0;
237 pVCpu->iem.s.cActiveMappings = 0;
238 pVCpu->iem.s.iNextMapping = 0;
239 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
240 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
241 pVCpu->iem.s.fDisregardLock = fDisregardLock;
242
243#ifdef DBGFTRACE_ENABLED
244 switch (enmMode)
245 {
246 case IEMMODE_64BIT:
247 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
248 break;
249 case IEMMODE_32BIT:
250 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
251 break;
252 case IEMMODE_16BIT:
253 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
254 break;
255 }
256#endif
257}
258
259
260/**
261 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
262 *
263 * This is mostly a copy of iemInitDecoder.
264 *
265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
266 */
267DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
268{
269 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
270 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
271 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
278
279 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
280 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
281 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
282 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
283 pVCpu->iem.s.enmEffAddrMode = enmMode;
284 if (enmMode != IEMMODE_64BIT)
285 {
286 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffOpSize = enmMode;
288 }
289 else
290 {
291 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
292 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
293 }
294 pVCpu->iem.s.fPrefixes = 0;
295 pVCpu->iem.s.uRexReg = 0;
296 pVCpu->iem.s.uRexB = 0;
297 pVCpu->iem.s.uRexIndex = 0;
298 pVCpu->iem.s.idxPrefix = 0;
299 pVCpu->iem.s.uVex3rdReg = 0;
300 pVCpu->iem.s.uVexLength = 0;
301 pVCpu->iem.s.fEvexStuff = 0;
302 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
303#ifdef IEM_WITH_CODE_TLB
304 if (pVCpu->iem.s.pbInstrBuf)
305 {
306 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
307 - pVCpu->iem.s.uInstrBufPc;
308 if (off < pVCpu->iem.s.cbInstrBufTotal)
309 {
310 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
311 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
312 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
313 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
314 else
315 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
316 }
317 else
318 {
319 pVCpu->iem.s.pbInstrBuf = NULL;
320 pVCpu->iem.s.offInstrNextByte = 0;
321 pVCpu->iem.s.offCurInstrStart = 0;
322 pVCpu->iem.s.cbInstrBuf = 0;
323 pVCpu->iem.s.cbInstrBufTotal = 0;
324 }
325 }
326 else
327 {
328 pVCpu->iem.s.offInstrNextByte = 0;
329 pVCpu->iem.s.offCurInstrStart = 0;
330 pVCpu->iem.s.cbInstrBuf = 0;
331 pVCpu->iem.s.cbInstrBufTotal = 0;
332 }
333#else
334 pVCpu->iem.s.cbOpcode = 0;
335 pVCpu->iem.s.offOpcode = 0;
336#endif
337 pVCpu->iem.s.offModRm = 0;
338 Assert(pVCpu->iem.s.cActiveMappings == 0);
339 pVCpu->iem.s.iNextMapping = 0;
340 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
341 Assert(pVCpu->iem.s.fBypassHandlers == false);
342
343#ifdef DBGFTRACE_ENABLED
344 switch (enmMode)
345 {
346 case IEMMODE_64BIT:
347 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
348 break;
349 case IEMMODE_32BIT:
350 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
351 break;
352 case IEMMODE_16BIT:
353 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
354 break;
355 }
356#endif
357}
358
359
360
361/**
362 * Prefetch opcodes the first time when starting executing.
363 *
364 * @returns Strict VBox status code.
365 * @param pVCpu The cross context virtual CPU structure of the
366 * calling thread.
367 * @param fBypassHandlers Whether to bypass access handlers.
368 * @param fDisregardLock Whether to disregard LOCK prefixes.
369 *
370 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
371 * store them as such.
372 */
373static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
374{
375 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
376
377#ifdef IEM_WITH_CODE_TLB
378 /** @todo Do ITLB lookup here. */
379
380#else /* !IEM_WITH_CODE_TLB */
381
382 /*
383 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
384 *
385 * First translate CS:rIP to a physical address.
386 */
387 uint32_t cbToTryRead;
388 RTGCPTR GCPtrPC;
389 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
390 {
391 cbToTryRead = GUEST_PAGE_SIZE;
392 GCPtrPC = pVCpu->cpum.GstCtx.rip;
393 if (IEM_IS_CANONICAL(GCPtrPC))
394 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
395 else
396 return iemRaiseGeneralProtectionFault0(pVCpu);
397 }
398 else
399 {
400 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
401 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
402 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
403 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
404 else
405 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
406 if (cbToTryRead) { /* likely */ }
407 else /* overflowed */
408 {
409 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
410 cbToTryRead = UINT32_MAX;
411 }
412 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
413 Assert(GCPtrPC <= UINT32_MAX);
414 }
415
416 PGMPTWALK Walk;
417 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
418 if (RT_SUCCESS(rc))
419 Assert(Walk.fSucceeded); /* probable. */
420 else
421 {
422 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
423#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
424 if (Walk.fFailed & PGM_WALKFAIL_EPT)
425 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
426#endif
427 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
428 }
429 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
430 else
431 {
432 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
433#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
434 if (Walk.fFailed & PGM_WALKFAIL_EPT)
435 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
436#endif
437 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
438 }
439 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
440 else
441 {
442 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
444 if (Walk.fFailed & PGM_WALKFAIL_EPT)
445 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
446#endif
447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
448 }
449 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
450 /** @todo Check reserved bits and such stuff. PGM is better at doing
451 * that, so do it when implementing the guest virtual address
452 * TLB... */
453
454 /*
455 * Read the bytes at this address.
456 */
457 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
458 if (cbToTryRead > cbLeftOnPage)
459 cbToTryRead = cbLeftOnPage;
460 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
461 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
462
463 if (!pVCpu->iem.s.fBypassHandlers)
464 {
465 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
467 { /* likely */ }
468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
469 {
470 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
473 }
474 else
475 {
476 Log((RT_SUCCESS(rcStrict)
477 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
478 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
479 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
480 return rcStrict;
481 }
482 }
483 else
484 {
485 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
486 if (RT_SUCCESS(rc))
487 { /* likely */ }
488 else
489 {
490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
491 GCPtrPC, GCPhys, rc, cbToTryRead));
492 return rc;
493 }
494 }
495 pVCpu->iem.s.cbOpcode = cbToTryRead;
496#endif /* !IEM_WITH_CODE_TLB */
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Invalidates the IEM TLBs.
503 *
504 * This is called internally as well as by PGM when moving GC mappings.
505 *
506 * @returns
507 * @param pVCpu The cross context virtual CPU structure of the calling
508 * thread.
509 */
510VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
511{
512#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
513 Log10(("IEMTlbInvalidateAll\n"));
514# ifdef IEM_WITH_CODE_TLB
515 pVCpu->iem.s.cbInstrBufTotal = 0;
516 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
517 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
518 { /* very likely */ }
519 else
520 {
521 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
522 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
523 while (i-- > 0)
524 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
525 }
526# endif
527
528# ifdef IEM_WITH_DATA_TLB
529 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
530 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
531 { /* very likely */ }
532 else
533 {
534 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
535 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
536 while (i-- > 0)
537 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
538 }
539# endif
540#else
541 RT_NOREF(pVCpu);
542#endif
543}
544
545
546/**
547 * Invalidates a page in the TLBs.
548 *
549 * @param pVCpu The cross context virtual CPU structure of the calling
550 * thread.
551 * @param GCPtr The address of the page to invalidate
552 * @thread EMT(pVCpu)
553 */
554VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
555{
556#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
557 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
558 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
559 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
560 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
561
562# ifdef IEM_WITH_CODE_TLB
563 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
564 {
565 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
566 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
567 pVCpu->iem.s.cbInstrBufTotal = 0;
568 }
569# endif
570
571# ifdef IEM_WITH_DATA_TLB
572 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
573 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
574# endif
575#else
576 NOREF(pVCpu); NOREF(GCPtr);
577#endif
578}
579
580
581#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
582/**
583 * Invalid both TLBs slow fashion following a rollover.
584 *
585 * Worker for IEMTlbInvalidateAllPhysical,
586 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
587 * iemMemMapJmp and others.
588 *
589 * @thread EMT(pVCpu)
590 */
591static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
592{
593 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
594 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
595 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
596
597 unsigned i;
598# ifdef IEM_WITH_CODE_TLB
599 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
600 while (i-- > 0)
601 {
602 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
603 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
604 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
605 }
606# endif
607# ifdef IEM_WITH_DATA_TLB
608 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
609 while (i-- > 0)
610 {
611 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
612 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
613 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
614 }
615# endif
616
617}
618#endif
619
620
621/**
622 * Invalidates the host physical aspects of the IEM TLBs.
623 *
624 * This is called internally as well as by PGM when moving GC mappings.
625 *
626 * @param pVCpu The cross context virtual CPU structure of the calling
627 * thread.
628 * @note Currently not used.
629 */
630VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
631{
632#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
633 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
634 Log10(("IEMTlbInvalidateAllPhysical\n"));
635
636# ifdef IEM_WITH_CODE_TLB
637 pVCpu->iem.s.cbInstrBufTotal = 0;
638# endif
639 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
640 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
641 {
642 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
643 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
644 }
645 else
646 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
647#else
648 NOREF(pVCpu);
649#endif
650}
651
652
653/**
654 * Invalidates the host physical aspects of the IEM TLBs.
655 *
656 * This is called internally as well as by PGM when moving GC mappings.
657 *
658 * @param pVM The cross context VM structure.
659 * @param idCpuCaller The ID of the calling EMT if available to the caller,
660 * otherwise NIL_VMCPUID.
661 *
662 * @remarks Caller holds the PGM lock.
663 */
664VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
665{
666#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
667 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
668 if (pVCpuCaller)
669 VMCPU_ASSERT_EMT(pVCpuCaller);
670 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
671
672 VMCC_FOR_EACH_VMCPU(pVM)
673 {
674# ifdef IEM_WITH_CODE_TLB
675 if (pVCpuCaller == pVCpu)
676 pVCpu->iem.s.cbInstrBufTotal = 0;
677# endif
678
679 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
680 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
681 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
682 { /* likely */}
683 else if (pVCpuCaller == pVCpu)
684 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
685 else
686 {
687 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
688 continue;
689 }
690 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
691 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
692 }
693 VMCC_FOR_EACH_VMCPU_END(pVM);
694
695#else
696 RT_NOREF(pVM, idCpuCaller);
697#endif
698}
699
700#ifdef IEM_WITH_CODE_TLB
701
702/**
703 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
704 * failure and jumps.
705 *
706 * We end up here for a number of reasons:
707 * - pbInstrBuf isn't yet initialized.
708 * - Advancing beyond the buffer boundrary (e.g. cross page).
709 * - Advancing beyond the CS segment limit.
710 * - Fetching from non-mappable page (e.g. MMIO).
711 *
712 * @param pVCpu The cross context virtual CPU structure of the
713 * calling thread.
714 * @param pvDst Where to return the bytes.
715 * @param cbDst Number of bytes to read.
716 *
717 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
718 */
719void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
720{
721#ifdef IN_RING3
722 for (;;)
723 {
724 Assert(cbDst <= 8);
725 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
726
727 /*
728 * We might have a partial buffer match, deal with that first to make the
729 * rest simpler. This is the first part of the cross page/buffer case.
730 */
731 if (pVCpu->iem.s.pbInstrBuf != NULL)
732 {
733 if (offBuf < pVCpu->iem.s.cbInstrBuf)
734 {
735 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
736 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
737 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
738
739 cbDst -= cbCopy;
740 pvDst = (uint8_t *)pvDst + cbCopy;
741 offBuf += cbCopy;
742 pVCpu->iem.s.offInstrNextByte += offBuf;
743 }
744 }
745
746 /*
747 * Check segment limit, figuring how much we're allowed to access at this point.
748 *
749 * We will fault immediately if RIP is past the segment limit / in non-canonical
750 * territory. If we do continue, there are one or more bytes to read before we
751 * end up in trouble and we need to do that first before faulting.
752 */
753 RTGCPTR GCPtrFirst;
754 uint32_t cbMaxRead;
755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
756 {
757 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
758 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
759 { /* likely */ }
760 else
761 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
762 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
763 }
764 else
765 {
766 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
767 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
768 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
769 { /* likely */ }
770 else
771 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
772 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
773 if (cbMaxRead != 0)
774 { /* likely */ }
775 else
776 {
777 /* Overflowed because address is 0 and limit is max. */
778 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
779 cbMaxRead = X86_PAGE_SIZE;
780 }
781 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
782 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
783 if (cbMaxRead2 < cbMaxRead)
784 cbMaxRead = cbMaxRead2;
785 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
786 }
787
788 /*
789 * Get the TLB entry for this piece of code.
790 */
791 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
792 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
793 if (pTlbe->uTag == uTag)
794 {
795 /* likely when executing lots of code, otherwise unlikely */
796# ifdef VBOX_WITH_STATISTICS
797 pVCpu->iem.s.CodeTlb.cTlbHits++;
798# endif
799 }
800 else
801 {
802 pVCpu->iem.s.CodeTlb.cTlbMisses++;
803 PGMPTWALK Walk;
804 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
805 if (RT_FAILURE(rc))
806 {
807#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
808 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
809 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
810#endif
811 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
812 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
813 }
814
815 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
816 Assert(Walk.fSucceeded);
817 pTlbe->uTag = uTag;
818 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
819 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
820 pTlbe->GCPhys = Walk.GCPhys;
821 pTlbe->pbMappingR3 = NULL;
822 }
823
824 /*
825 * Check TLB page table level access flags.
826 */
827 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
828 {
829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
830 {
831 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
832 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
833 }
834 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
835 {
836 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
837 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
838 }
839 }
840
841 /*
842 * Look up the physical page info if necessary.
843 */
844 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
845 { /* not necessary */ }
846 else
847 {
848 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
849 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
850 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
851 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
852 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
853 { /* likely */ }
854 else
855 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
856 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
857 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
858 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
859 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
860 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
861 }
862
863# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
864 /*
865 * Try do a direct read using the pbMappingR3 pointer.
866 */
867 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
868 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
869 {
870 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
871 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
872 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
873 {
874 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
875 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
876 }
877 else
878 {
879 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
880 Assert(cbInstr < cbMaxRead);
881 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
882 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
883 }
884 if (cbDst <= cbMaxRead)
885 {
886 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
887 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
888 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
889 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
890 return;
891 }
892 pVCpu->iem.s.pbInstrBuf = NULL;
893
894 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
895 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
896 }
897 else
898# endif
899#if 0
900 /*
901 * If there is no special read handling, so we can read a bit more and
902 * put it in the prefetch buffer.
903 */
904 if ( cbDst < cbMaxRead
905 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
906 {
907 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
908 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
909 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
910 { /* likely */ }
911 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
912 {
913 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
914 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
915 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
916 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
917 }
918 else
919 {
920 Log((RT_SUCCESS(rcStrict)
921 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
922 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
923 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
924 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
925 }
926 }
927 /*
928 * Special read handling, so only read exactly what's needed.
929 * This is a highly unlikely scenario.
930 */
931 else
932#endif
933 {
934 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
935 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
936 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
937 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
938 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
939 { /* likely */ }
940 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
941 {
942 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
943 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
944 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
945 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
946 }
947 else
948 {
949 Log((RT_SUCCESS(rcStrict)
950 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
951 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
952 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
953 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
954 }
955 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
956 if (cbToRead == cbDst)
957 return;
958 }
959
960 /*
961 * More to read, loop.
962 */
963 cbDst -= cbMaxRead;
964 pvDst = (uint8_t *)pvDst + cbMaxRead;
965 }
966#else
967 RT_NOREF(pvDst, cbDst);
968 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
969#endif
970}
971
972#else
973
974/**
975 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
976 * exception if it fails.
977 *
978 * @returns Strict VBox status code.
979 * @param pVCpu The cross context virtual CPU structure of the
980 * calling thread.
981 * @param cbMin The minimum number of bytes relative offOpcode
982 * that must be read.
983 */
984VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
985{
986 /*
987 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
988 *
989 * First translate CS:rIP to a physical address.
990 */
991 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
992 uint32_t cbToTryRead;
993 RTGCPTR GCPtrNext;
994 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
995 {
996 cbToTryRead = GUEST_PAGE_SIZE;
997 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
998 if (!IEM_IS_CANONICAL(GCPtrNext))
999 return iemRaiseGeneralProtectionFault0(pVCpu);
1000 }
1001 else
1002 {
1003 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1004 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1005 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1006 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1007 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1008 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1009 if (!cbToTryRead) /* overflowed */
1010 {
1011 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1012 cbToTryRead = UINT32_MAX;
1013 /** @todo check out wrapping around the code segment. */
1014 }
1015 if (cbToTryRead < cbMin - cbLeft)
1016 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1017 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1018 }
1019
1020 /* Only read up to the end of the page, and make sure we don't read more
1021 than the opcode buffer can hold. */
1022 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1023 if (cbToTryRead > cbLeftOnPage)
1024 cbToTryRead = cbLeftOnPage;
1025 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1026 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1027/** @todo r=bird: Convert assertion into undefined opcode exception? */
1028 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1029
1030 PGMPTWALK Walk;
1031 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1032 if (RT_FAILURE(rc))
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1035#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1036 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1037 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1038#endif
1039 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1040 }
1041 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1042 {
1043 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1044#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1045 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1046 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1047#endif
1048 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1049 }
1050 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1051 {
1052 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1053#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1054 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1055 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1056#endif
1057 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1058 }
1059 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1060 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1061 /** @todo Check reserved bits and such stuff. PGM is better at doing
1062 * that, so do it when implementing the guest virtual address
1063 * TLB... */
1064
1065 /*
1066 * Read the bytes at this address.
1067 *
1068 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1069 * and since PATM should only patch the start of an instruction there
1070 * should be no need to check again here.
1071 */
1072 if (!pVCpu->iem.s.fBypassHandlers)
1073 {
1074 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1075 cbToTryRead, PGMACCESSORIGIN_IEM);
1076 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1077 { /* likely */ }
1078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1079 {
1080 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1083 }
1084 else
1085 {
1086 Log((RT_SUCCESS(rcStrict)
1087 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1088 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1089 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1090 return rcStrict;
1091 }
1092 }
1093 else
1094 {
1095 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1096 if (RT_SUCCESS(rc))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1101 return rc;
1102 }
1103 }
1104 pVCpu->iem.s.cbOpcode += cbToTryRead;
1105 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1106
1107 return VINF_SUCCESS;
1108}
1109
1110#endif /* !IEM_WITH_CODE_TLB */
1111#ifndef IEM_WITH_SETJMP
1112
1113/**
1114 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1115 *
1116 * @returns Strict VBox status code.
1117 * @param pVCpu The cross context virtual CPU structure of the
1118 * calling thread.
1119 * @param pb Where to return the opcode byte.
1120 */
1121VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1122{
1123 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1124 if (rcStrict == VINF_SUCCESS)
1125 {
1126 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1127 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1128 pVCpu->iem.s.offOpcode = offOpcode + 1;
1129 }
1130 else
1131 *pb = 0;
1132 return rcStrict;
1133}
1134
1135#else /* IEM_WITH_SETJMP */
1136
1137/**
1138 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1139 *
1140 * @returns The opcode byte.
1141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1142 */
1143uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1144{
1145# ifdef IEM_WITH_CODE_TLB
1146 uint8_t u8;
1147 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1148 return u8;
1149# else
1150 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1151 if (rcStrict == VINF_SUCCESS)
1152 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1153 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1154# endif
1155}
1156
1157#endif /* IEM_WITH_SETJMP */
1158
1159#ifndef IEM_WITH_SETJMP
1160
1161/**
1162 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1163 *
1164 * @returns Strict VBox status code.
1165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1166 * @param pu16 Where to return the opcode dword.
1167 */
1168VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1169{
1170 uint8_t u8;
1171 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1172 if (rcStrict == VINF_SUCCESS)
1173 *pu16 = (int8_t)u8;
1174 return rcStrict;
1175}
1176
1177
1178/**
1179 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1180 *
1181 * @returns Strict VBox status code.
1182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1183 * @param pu32 Where to return the opcode dword.
1184 */
1185VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1186{
1187 uint8_t u8;
1188 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1189 if (rcStrict == VINF_SUCCESS)
1190 *pu32 = (int8_t)u8;
1191 return rcStrict;
1192}
1193
1194
1195/**
1196 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1197 *
1198 * @returns Strict VBox status code.
1199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1200 * @param pu64 Where to return the opcode qword.
1201 */
1202VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1203{
1204 uint8_t u8;
1205 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1206 if (rcStrict == VINF_SUCCESS)
1207 *pu64 = (int8_t)u8;
1208 return rcStrict;
1209}
1210
1211#endif /* !IEM_WITH_SETJMP */
1212
1213
1214#ifndef IEM_WITH_SETJMP
1215
1216/**
1217 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1221 * @param pu16 Where to return the opcode word.
1222 */
1223VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1224{
1225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1226 if (rcStrict == VINF_SUCCESS)
1227 {
1228 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1229# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1230 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1231# else
1232 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1233# endif
1234 pVCpu->iem.s.offOpcode = offOpcode + 2;
1235 }
1236 else
1237 *pu16 = 0;
1238 return rcStrict;
1239}
1240
1241#else /* IEM_WITH_SETJMP */
1242
1243/**
1244 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1245 *
1246 * @returns The opcode word.
1247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1248 */
1249uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1250{
1251# ifdef IEM_WITH_CODE_TLB
1252 uint16_t u16;
1253 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1254 return u16;
1255# else
1256 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1257 if (rcStrict == VINF_SUCCESS)
1258 {
1259 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1260 pVCpu->iem.s.offOpcode += 2;
1261# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1262 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1263# else
1264 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1265# endif
1266 }
1267 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1268# endif
1269}
1270
1271#endif /* IEM_WITH_SETJMP */
1272
1273#ifndef IEM_WITH_SETJMP
1274
1275/**
1276 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1280 * @param pu32 Where to return the opcode double word.
1281 */
1282VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1283{
1284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1285 if (rcStrict == VINF_SUCCESS)
1286 {
1287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1288 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1289 pVCpu->iem.s.offOpcode = offOpcode + 2;
1290 }
1291 else
1292 *pu32 = 0;
1293 return rcStrict;
1294}
1295
1296
1297/**
1298 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1299 *
1300 * @returns Strict VBox status code.
1301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1302 * @param pu64 Where to return the opcode quad word.
1303 */
1304VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1305{
1306 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1307 if (rcStrict == VINF_SUCCESS)
1308 {
1309 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1310 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1311 pVCpu->iem.s.offOpcode = offOpcode + 2;
1312 }
1313 else
1314 *pu64 = 0;
1315 return rcStrict;
1316}
1317
1318#endif /* !IEM_WITH_SETJMP */
1319
1320#ifndef IEM_WITH_SETJMP
1321
1322/**
1323 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1324 *
1325 * @returns Strict VBox status code.
1326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1327 * @param pu32 Where to return the opcode dword.
1328 */
1329VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1330{
1331 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1332 if (rcStrict == VINF_SUCCESS)
1333 {
1334 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1335# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1336 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1337# else
1338 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1339 pVCpu->iem.s.abOpcode[offOpcode + 1],
1340 pVCpu->iem.s.abOpcode[offOpcode + 2],
1341 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1342# endif
1343 pVCpu->iem.s.offOpcode = offOpcode + 4;
1344 }
1345 else
1346 *pu32 = 0;
1347 return rcStrict;
1348}
1349
1350#else /* IEM_WITH_SETJMP */
1351
1352/**
1353 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1354 *
1355 * @returns The opcode dword.
1356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1357 */
1358uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1359{
1360# ifdef IEM_WITH_CODE_TLB
1361 uint32_t u32;
1362 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1363 return u32;
1364# else
1365 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1366 if (rcStrict == VINF_SUCCESS)
1367 {
1368 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1369 pVCpu->iem.s.offOpcode = offOpcode + 4;
1370# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1371 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1372# else
1373 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1374 pVCpu->iem.s.abOpcode[offOpcode + 1],
1375 pVCpu->iem.s.abOpcode[offOpcode + 2],
1376 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1377# endif
1378 }
1379 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1380# endif
1381}
1382
1383#endif /* IEM_WITH_SETJMP */
1384
1385#ifndef IEM_WITH_SETJMP
1386
1387/**
1388 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1389 *
1390 * @returns Strict VBox status code.
1391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1392 * @param pu64 Where to return the opcode dword.
1393 */
1394VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1395{
1396 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1397 if (rcStrict == VINF_SUCCESS)
1398 {
1399 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1400 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1401 pVCpu->iem.s.abOpcode[offOpcode + 1],
1402 pVCpu->iem.s.abOpcode[offOpcode + 2],
1403 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1404 pVCpu->iem.s.offOpcode = offOpcode + 4;
1405 }
1406 else
1407 *pu64 = 0;
1408 return rcStrict;
1409}
1410
1411
1412/**
1413 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1414 *
1415 * @returns Strict VBox status code.
1416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1417 * @param pu64 Where to return the opcode qword.
1418 */
1419VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1420{
1421 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1422 if (rcStrict == VINF_SUCCESS)
1423 {
1424 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1425 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1426 pVCpu->iem.s.abOpcode[offOpcode + 1],
1427 pVCpu->iem.s.abOpcode[offOpcode + 2],
1428 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1429 pVCpu->iem.s.offOpcode = offOpcode + 4;
1430 }
1431 else
1432 *pu64 = 0;
1433 return rcStrict;
1434}
1435
1436#endif /* !IEM_WITH_SETJMP */
1437
1438#ifndef IEM_WITH_SETJMP
1439
1440/**
1441 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1442 *
1443 * @returns Strict VBox status code.
1444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1445 * @param pu64 Where to return the opcode qword.
1446 */
1447VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1448{
1449 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1450 if (rcStrict == VINF_SUCCESS)
1451 {
1452 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1453# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1454 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1455# else
1456 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1457 pVCpu->iem.s.abOpcode[offOpcode + 1],
1458 pVCpu->iem.s.abOpcode[offOpcode + 2],
1459 pVCpu->iem.s.abOpcode[offOpcode + 3],
1460 pVCpu->iem.s.abOpcode[offOpcode + 4],
1461 pVCpu->iem.s.abOpcode[offOpcode + 5],
1462 pVCpu->iem.s.abOpcode[offOpcode + 6],
1463 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1464# endif
1465 pVCpu->iem.s.offOpcode = offOpcode + 8;
1466 }
1467 else
1468 *pu64 = 0;
1469 return rcStrict;
1470}
1471
1472#else /* IEM_WITH_SETJMP */
1473
1474/**
1475 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1476 *
1477 * @returns The opcode qword.
1478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1479 */
1480uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1481{
1482# ifdef IEM_WITH_CODE_TLB
1483 uint64_t u64;
1484 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1485 return u64;
1486# else
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 pVCpu->iem.s.offOpcode = offOpcode + 8;
1492# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1493 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1494# else
1495 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1496 pVCpu->iem.s.abOpcode[offOpcode + 1],
1497 pVCpu->iem.s.abOpcode[offOpcode + 2],
1498 pVCpu->iem.s.abOpcode[offOpcode + 3],
1499 pVCpu->iem.s.abOpcode[offOpcode + 4],
1500 pVCpu->iem.s.abOpcode[offOpcode + 5],
1501 pVCpu->iem.s.abOpcode[offOpcode + 6],
1502 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1503# endif
1504 }
1505 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1506# endif
1507}
1508
1509#endif /* IEM_WITH_SETJMP */
1510
1511
1512
1513/** @name Misc Worker Functions.
1514 * @{
1515 */
1516
1517/**
1518 * Gets the exception class for the specified exception vector.
1519 *
1520 * @returns The class of the specified exception.
1521 * @param uVector The exception vector.
1522 */
1523static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1524{
1525 Assert(uVector <= X86_XCPT_LAST);
1526 switch (uVector)
1527 {
1528 case X86_XCPT_DE:
1529 case X86_XCPT_TS:
1530 case X86_XCPT_NP:
1531 case X86_XCPT_SS:
1532 case X86_XCPT_GP:
1533 case X86_XCPT_SX: /* AMD only */
1534 return IEMXCPTCLASS_CONTRIBUTORY;
1535
1536 case X86_XCPT_PF:
1537 case X86_XCPT_VE: /* Intel only */
1538 return IEMXCPTCLASS_PAGE_FAULT;
1539
1540 case X86_XCPT_DF:
1541 return IEMXCPTCLASS_DOUBLE_FAULT;
1542 }
1543 return IEMXCPTCLASS_BENIGN;
1544}
1545
1546
1547/**
1548 * Evaluates how to handle an exception caused during delivery of another event
1549 * (exception / interrupt).
1550 *
1551 * @returns How to handle the recursive exception.
1552 * @param pVCpu The cross context virtual CPU structure of the
1553 * calling thread.
1554 * @param fPrevFlags The flags of the previous event.
1555 * @param uPrevVector The vector of the previous event.
1556 * @param fCurFlags The flags of the current exception.
1557 * @param uCurVector The vector of the current exception.
1558 * @param pfXcptRaiseInfo Where to store additional information about the
1559 * exception condition. Optional.
1560 */
1561VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1562 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1563{
1564 /*
1565 * Only CPU exceptions can be raised while delivering other events, software interrupt
1566 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1567 */
1568 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1569 Assert(pVCpu); RT_NOREF(pVCpu);
1570 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1571
1572 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1573 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1574 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1575 {
1576 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1577 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1578 {
1579 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1580 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1581 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1582 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1583 {
1584 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1585 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1586 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1587 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1588 uCurVector, pVCpu->cpum.GstCtx.cr2));
1589 }
1590 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1591 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1592 {
1593 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1594 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1595 }
1596 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1597 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1598 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1599 {
1600 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1601 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1602 }
1603 }
1604 else
1605 {
1606 if (uPrevVector == X86_XCPT_NMI)
1607 {
1608 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1609 if (uCurVector == X86_XCPT_PF)
1610 {
1611 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1612 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1613 }
1614 }
1615 else if ( uPrevVector == X86_XCPT_AC
1616 && uCurVector == X86_XCPT_AC)
1617 {
1618 enmRaise = IEMXCPTRAISE_CPU_HANG;
1619 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1620 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1621 }
1622 }
1623 }
1624 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1625 {
1626 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1627 if (uCurVector == X86_XCPT_PF)
1628 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1629 }
1630 else
1631 {
1632 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1633 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1634 }
1635
1636 if (pfXcptRaiseInfo)
1637 *pfXcptRaiseInfo = fRaiseInfo;
1638 return enmRaise;
1639}
1640
1641
1642/**
1643 * Enters the CPU shutdown state initiated by a triple fault or other
1644 * unrecoverable conditions.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the
1648 * calling thread.
1649 */
1650static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1651{
1652 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1653 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1654
1655 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1656 {
1657 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1658 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1659 }
1660
1661 RT_NOREF(pVCpu);
1662 return VINF_EM_TRIPLE_FAULT;
1663}
1664
1665
1666/**
1667 * Validates a new SS segment.
1668 *
1669 * @returns VBox strict status code.
1670 * @param pVCpu The cross context virtual CPU structure of the
1671 * calling thread.
1672 * @param NewSS The new SS selctor.
1673 * @param uCpl The CPL to load the stack for.
1674 * @param pDesc Where to return the descriptor.
1675 */
1676static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1677{
1678 /* Null selectors are not allowed (we're not called for dispatching
1679 interrupts with SS=0 in long mode). */
1680 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1681 {
1682 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1683 return iemRaiseTaskSwitchFault0(pVCpu);
1684 }
1685
1686 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1687 if ((NewSS & X86_SEL_RPL) != uCpl)
1688 {
1689 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1690 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1691 }
1692
1693 /*
1694 * Read the descriptor.
1695 */
1696 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1697 if (rcStrict != VINF_SUCCESS)
1698 return rcStrict;
1699
1700 /*
1701 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1702 */
1703 if (!pDesc->Legacy.Gen.u1DescType)
1704 {
1705 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1706 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1707 }
1708
1709 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1710 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1711 {
1712 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1713 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1714 }
1715 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1716 {
1717 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1718 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1719 }
1720
1721 /* Is it there? */
1722 /** @todo testcase: Is this checked before the canonical / limit check below? */
1723 if (!pDesc->Legacy.Gen.u1Present)
1724 {
1725 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1726 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1727 }
1728
1729 return VINF_SUCCESS;
1730}
1731
1732/** @} */
1733
1734
1735/** @name Raising Exceptions.
1736 *
1737 * @{
1738 */
1739
1740
1741/**
1742 * Loads the specified stack far pointer from the TSS.
1743 *
1744 * @returns VBox strict status code.
1745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1746 * @param uCpl The CPL to load the stack for.
1747 * @param pSelSS Where to return the new stack segment.
1748 * @param puEsp Where to return the new stack pointer.
1749 */
1750static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1751{
1752 VBOXSTRICTRC rcStrict;
1753 Assert(uCpl < 4);
1754
1755 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1756 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1757 {
1758 /*
1759 * 16-bit TSS (X86TSS16).
1760 */
1761 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1762 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1763 {
1764 uint32_t off = uCpl * 4 + 2;
1765 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1766 {
1767 /** @todo check actual access pattern here. */
1768 uint32_t u32Tmp = 0; /* gcc maybe... */
1769 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1770 if (rcStrict == VINF_SUCCESS)
1771 {
1772 *puEsp = RT_LOWORD(u32Tmp);
1773 *pSelSS = RT_HIWORD(u32Tmp);
1774 return VINF_SUCCESS;
1775 }
1776 }
1777 else
1778 {
1779 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1780 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1781 }
1782 break;
1783 }
1784
1785 /*
1786 * 32-bit TSS (X86TSS32).
1787 */
1788 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1789 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1790 {
1791 uint32_t off = uCpl * 8 + 4;
1792 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1793 {
1794/** @todo check actual access pattern here. */
1795 uint64_t u64Tmp;
1796 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1797 if (rcStrict == VINF_SUCCESS)
1798 {
1799 *puEsp = u64Tmp & UINT32_MAX;
1800 *pSelSS = (RTSEL)(u64Tmp >> 32);
1801 return VINF_SUCCESS;
1802 }
1803 }
1804 else
1805 {
1806 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1807 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1808 }
1809 break;
1810 }
1811
1812 default:
1813 AssertFailed();
1814 rcStrict = VERR_IEM_IPE_4;
1815 break;
1816 }
1817
1818 *puEsp = 0; /* make gcc happy */
1819 *pSelSS = 0; /* make gcc happy */
1820 return rcStrict;
1821}
1822
1823
1824/**
1825 * Loads the specified stack pointer from the 64-bit TSS.
1826 *
1827 * @returns VBox strict status code.
1828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1829 * @param uCpl The CPL to load the stack for.
1830 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1831 * @param puRsp Where to return the new stack pointer.
1832 */
1833static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1834{
1835 Assert(uCpl < 4);
1836 Assert(uIst < 8);
1837 *puRsp = 0; /* make gcc happy */
1838
1839 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1840 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1841
1842 uint32_t off;
1843 if (uIst)
1844 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1845 else
1846 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1847 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1848 {
1849 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1850 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1851 }
1852
1853 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1854}
1855
1856
1857/**
1858 * Adjust the CPU state according to the exception being raised.
1859 *
1860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1861 * @param u8Vector The exception that has been raised.
1862 */
1863DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1864{
1865 switch (u8Vector)
1866 {
1867 case X86_XCPT_DB:
1868 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1869 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1870 break;
1871 /** @todo Read the AMD and Intel exception reference... */
1872 }
1873}
1874
1875
1876/**
1877 * Implements exceptions and interrupts for real mode.
1878 *
1879 * @returns VBox strict status code.
1880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1881 * @param cbInstr The number of bytes to offset rIP by in the return
1882 * address.
1883 * @param u8Vector The interrupt / exception vector number.
1884 * @param fFlags The flags.
1885 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1886 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1887 */
1888static VBOXSTRICTRC
1889iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1890 uint8_t cbInstr,
1891 uint8_t u8Vector,
1892 uint32_t fFlags,
1893 uint16_t uErr,
1894 uint64_t uCr2) RT_NOEXCEPT
1895{
1896 NOREF(uErr); NOREF(uCr2);
1897 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1898
1899 /*
1900 * Read the IDT entry.
1901 */
1902 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1903 {
1904 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1905 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1906 }
1907 RTFAR16 Idte;
1908 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1909 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1910 {
1911 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1912 return rcStrict;
1913 }
1914
1915 /*
1916 * Push the stack frame.
1917 */
1918 uint16_t *pu16Frame;
1919 uint64_t uNewRsp;
1920 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1921 if (rcStrict != VINF_SUCCESS)
1922 return rcStrict;
1923
1924 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1925#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1926 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1927 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1928 fEfl |= UINT16_C(0xf000);
1929#endif
1930 pu16Frame[2] = (uint16_t)fEfl;
1931 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1932 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1933 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1934 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1935 return rcStrict;
1936
1937 /*
1938 * Load the vector address into cs:ip and make exception specific state
1939 * adjustments.
1940 */
1941 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1942 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1943 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1944 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1945 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1946 pVCpu->cpum.GstCtx.rip = Idte.off;
1947 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1948 IEMMISC_SET_EFL(pVCpu, fEfl);
1949
1950 /** @todo do we actually do this in real mode? */
1951 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1952 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1953
1954 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1955}
1956
1957
1958/**
1959 * Loads a NULL data selector into when coming from V8086 mode.
1960 *
1961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1962 * @param pSReg Pointer to the segment register.
1963 */
1964DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1965{
1966 pSReg->Sel = 0;
1967 pSReg->ValidSel = 0;
1968 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1969 {
1970 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1971 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1972 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1973 }
1974 else
1975 {
1976 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1977 /** @todo check this on AMD-V */
1978 pSReg->u64Base = 0;
1979 pSReg->u32Limit = 0;
1980 }
1981}
1982
1983
1984/**
1985 * Loads a segment selector during a task switch in V8086 mode.
1986 *
1987 * @param pSReg Pointer to the segment register.
1988 * @param uSel The selector value to load.
1989 */
1990DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1991{
1992 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1993 pSReg->Sel = uSel;
1994 pSReg->ValidSel = uSel;
1995 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1996 pSReg->u64Base = uSel << 4;
1997 pSReg->u32Limit = 0xffff;
1998 pSReg->Attr.u = 0xf3;
1999}
2000
2001
2002/**
2003 * Loads a segment selector during a task switch in protected mode.
2004 *
2005 * In this task switch scenario, we would throw \#TS exceptions rather than
2006 * \#GPs.
2007 *
2008 * @returns VBox strict status code.
2009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2010 * @param pSReg Pointer to the segment register.
2011 * @param uSel The new selector value.
2012 *
2013 * @remarks This does _not_ handle CS or SS.
2014 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2015 */
2016static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2017{
2018 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2019
2020 /* Null data selector. */
2021 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2022 {
2023 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2025 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2026 return VINF_SUCCESS;
2027 }
2028
2029 /* Fetch the descriptor. */
2030 IEMSELDESC Desc;
2031 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2032 if (rcStrict != VINF_SUCCESS)
2033 {
2034 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2035 VBOXSTRICTRC_VAL(rcStrict)));
2036 return rcStrict;
2037 }
2038
2039 /* Must be a data segment or readable code segment. */
2040 if ( !Desc.Legacy.Gen.u1DescType
2041 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2042 {
2043 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2044 Desc.Legacy.Gen.u4Type));
2045 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2046 }
2047
2048 /* Check privileges for data segments and non-conforming code segments. */
2049 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2050 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2051 {
2052 /* The RPL and the new CPL must be less than or equal to the DPL. */
2053 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2054 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2055 {
2056 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2057 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2058 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2059 }
2060 }
2061
2062 /* Is it there? */
2063 if (!Desc.Legacy.Gen.u1Present)
2064 {
2065 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2066 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2067 }
2068
2069 /* The base and limit. */
2070 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2071 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2072
2073 /*
2074 * Ok, everything checked out fine. Now set the accessed bit before
2075 * committing the result into the registers.
2076 */
2077 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2078 {
2079 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2080 if (rcStrict != VINF_SUCCESS)
2081 return rcStrict;
2082 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2083 }
2084
2085 /* Commit */
2086 pSReg->Sel = uSel;
2087 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2088 pSReg->u32Limit = cbLimit;
2089 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2090 pSReg->ValidSel = uSel;
2091 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2092 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2093 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2094
2095 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2096 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2097 return VINF_SUCCESS;
2098}
2099
2100
2101/**
2102 * Performs a task switch.
2103 *
2104 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2105 * caller is responsible for performing the necessary checks (like DPL, TSS
2106 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2107 * reference for JMP, CALL, IRET.
2108 *
2109 * If the task switch is the due to a software interrupt or hardware exception,
2110 * the caller is responsible for validating the TSS selector and descriptor. See
2111 * Intel Instruction reference for INT n.
2112 *
2113 * @returns VBox strict status code.
2114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2115 * @param enmTaskSwitch The cause of the task switch.
2116 * @param uNextEip The EIP effective after the task switch.
2117 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2118 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2119 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2120 * @param SelTSS The TSS selector of the new task.
2121 * @param pNewDescTSS Pointer to the new TSS descriptor.
2122 */
2123VBOXSTRICTRC
2124iemTaskSwitch(PVMCPUCC pVCpu,
2125 IEMTASKSWITCH enmTaskSwitch,
2126 uint32_t uNextEip,
2127 uint32_t fFlags,
2128 uint16_t uErr,
2129 uint64_t uCr2,
2130 RTSEL SelTSS,
2131 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2132{
2133 Assert(!IEM_IS_REAL_MODE(pVCpu));
2134 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2135 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2136
2137 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2138 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2139 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2140 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2141 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2142
2143 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2144 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2145
2146 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2147 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2148
2149 /* Update CR2 in case it's a page-fault. */
2150 /** @todo This should probably be done much earlier in IEM/PGM. See
2151 * @bugref{5653#c49}. */
2152 if (fFlags & IEM_XCPT_FLAGS_CR2)
2153 pVCpu->cpum.GstCtx.cr2 = uCr2;
2154
2155 /*
2156 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2157 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2158 */
2159 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2160 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2161 if (uNewTSSLimit < uNewTSSLimitMin)
2162 {
2163 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2164 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2165 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2166 }
2167
2168 /*
2169 * Task switches in VMX non-root mode always cause task switches.
2170 * The new TSS must have been read and validated (DPL, limits etc.) before a
2171 * task-switch VM-exit commences.
2172 *
2173 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2174 */
2175 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2176 {
2177 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2178 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2179 }
2180
2181 /*
2182 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2183 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2184 */
2185 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2186 {
2187 uint32_t const uExitInfo1 = SelTSS;
2188 uint32_t uExitInfo2 = uErr;
2189 switch (enmTaskSwitch)
2190 {
2191 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2192 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2193 default: break;
2194 }
2195 if (fFlags & IEM_XCPT_FLAGS_ERR)
2196 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2197 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2198 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2199
2200 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2201 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2202 RT_NOREF2(uExitInfo1, uExitInfo2);
2203 }
2204
2205 /*
2206 * Check the current TSS limit. The last written byte to the current TSS during the
2207 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2208 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2209 *
2210 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2211 * end up with smaller than "legal" TSS limits.
2212 */
2213 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2214 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2215 if (uCurTSSLimit < uCurTSSLimitMin)
2216 {
2217 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2218 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2219 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2220 }
2221
2222 /*
2223 * Verify that the new TSS can be accessed and map it. Map only the required contents
2224 * and not the entire TSS.
2225 */
2226 void *pvNewTSS;
2227 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2228 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2229 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2230 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2231 * not perform correct translation if this happens. See Intel spec. 7.2.1
2232 * "Task-State Segment". */
2233 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2234 if (rcStrict != VINF_SUCCESS)
2235 {
2236 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2237 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2238 return rcStrict;
2239 }
2240
2241 /*
2242 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2243 */
2244 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2245 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2246 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2247 {
2248 PX86DESC pDescCurTSS;
2249 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2250 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2251 if (rcStrict != VINF_SUCCESS)
2252 {
2253 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2254 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2255 return rcStrict;
2256 }
2257
2258 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2259 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2260 if (rcStrict != VINF_SUCCESS)
2261 {
2262 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2263 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2264 return rcStrict;
2265 }
2266
2267 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2268 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2269 {
2270 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2271 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2272 u32EFlags &= ~X86_EFL_NT;
2273 }
2274 }
2275
2276 /*
2277 * Save the CPU state into the current TSS.
2278 */
2279 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2280 if (GCPtrNewTSS == GCPtrCurTSS)
2281 {
2282 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2283 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2284 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2285 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2286 pVCpu->cpum.GstCtx.ldtr.Sel));
2287 }
2288 if (fIsNewTSS386)
2289 {
2290 /*
2291 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2292 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2293 */
2294 void *pvCurTSS32;
2295 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2296 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2297 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2298 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2299 if (rcStrict != VINF_SUCCESS)
2300 {
2301 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2302 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2303 return rcStrict;
2304 }
2305
2306 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2307 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2308 pCurTSS32->eip = uNextEip;
2309 pCurTSS32->eflags = u32EFlags;
2310 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2311 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2312 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2313 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2314 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2315 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2316 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2317 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2318 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2319 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2320 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2321 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2322 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2323 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2324
2325 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2326 if (rcStrict != VINF_SUCCESS)
2327 {
2328 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2329 VBOXSTRICTRC_VAL(rcStrict)));
2330 return rcStrict;
2331 }
2332 }
2333 else
2334 {
2335 /*
2336 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2337 */
2338 void *pvCurTSS16;
2339 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2340 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2341 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2342 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2343 if (rcStrict != VINF_SUCCESS)
2344 {
2345 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2346 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2347 return rcStrict;
2348 }
2349
2350 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2351 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2352 pCurTSS16->ip = uNextEip;
2353 pCurTSS16->flags = u32EFlags;
2354 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2355 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2356 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2357 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2358 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2359 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2360 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2361 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2362 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2363 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2364 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2365 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2366
2367 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2368 if (rcStrict != VINF_SUCCESS)
2369 {
2370 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2371 VBOXSTRICTRC_VAL(rcStrict)));
2372 return rcStrict;
2373 }
2374 }
2375
2376 /*
2377 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2378 */
2379 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2380 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2381 {
2382 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2383 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2384 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2385 }
2386
2387 /*
2388 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2389 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2390 */
2391 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2392 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2393 bool fNewDebugTrap;
2394 if (fIsNewTSS386)
2395 {
2396 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2397 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2398 uNewEip = pNewTSS32->eip;
2399 uNewEflags = pNewTSS32->eflags;
2400 uNewEax = pNewTSS32->eax;
2401 uNewEcx = pNewTSS32->ecx;
2402 uNewEdx = pNewTSS32->edx;
2403 uNewEbx = pNewTSS32->ebx;
2404 uNewEsp = pNewTSS32->esp;
2405 uNewEbp = pNewTSS32->ebp;
2406 uNewEsi = pNewTSS32->esi;
2407 uNewEdi = pNewTSS32->edi;
2408 uNewES = pNewTSS32->es;
2409 uNewCS = pNewTSS32->cs;
2410 uNewSS = pNewTSS32->ss;
2411 uNewDS = pNewTSS32->ds;
2412 uNewFS = pNewTSS32->fs;
2413 uNewGS = pNewTSS32->gs;
2414 uNewLdt = pNewTSS32->selLdt;
2415 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2416 }
2417 else
2418 {
2419 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2420 uNewCr3 = 0;
2421 uNewEip = pNewTSS16->ip;
2422 uNewEflags = pNewTSS16->flags;
2423 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2424 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2425 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2426 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2427 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2428 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2429 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2430 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2431 uNewES = pNewTSS16->es;
2432 uNewCS = pNewTSS16->cs;
2433 uNewSS = pNewTSS16->ss;
2434 uNewDS = pNewTSS16->ds;
2435 uNewFS = 0;
2436 uNewGS = 0;
2437 uNewLdt = pNewTSS16->selLdt;
2438 fNewDebugTrap = false;
2439 }
2440
2441 if (GCPtrNewTSS == GCPtrCurTSS)
2442 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2443 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2444
2445 /*
2446 * We're done accessing the new TSS.
2447 */
2448 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2449 if (rcStrict != VINF_SUCCESS)
2450 {
2451 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2452 return rcStrict;
2453 }
2454
2455 /*
2456 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2457 */
2458 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2459 {
2460 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2461 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2462 if (rcStrict != VINF_SUCCESS)
2463 {
2464 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2465 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2466 return rcStrict;
2467 }
2468
2469 /* Check that the descriptor indicates the new TSS is available (not busy). */
2470 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2471 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2472 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2473
2474 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2475 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2476 if (rcStrict != VINF_SUCCESS)
2477 {
2478 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2479 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2480 return rcStrict;
2481 }
2482 }
2483
2484 /*
2485 * From this point on, we're technically in the new task. We will defer exceptions
2486 * until the completion of the task switch but before executing any instructions in the new task.
2487 */
2488 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2489 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2490 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2491 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2492 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2493 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2494 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2495
2496 /* Set the busy bit in TR. */
2497 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2498
2499 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2500 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2501 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2502 {
2503 uNewEflags |= X86_EFL_NT;
2504 }
2505
2506 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2507 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2508 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2509
2510 pVCpu->cpum.GstCtx.eip = uNewEip;
2511 pVCpu->cpum.GstCtx.eax = uNewEax;
2512 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2513 pVCpu->cpum.GstCtx.edx = uNewEdx;
2514 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2515 pVCpu->cpum.GstCtx.esp = uNewEsp;
2516 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2517 pVCpu->cpum.GstCtx.esi = uNewEsi;
2518 pVCpu->cpum.GstCtx.edi = uNewEdi;
2519
2520 uNewEflags &= X86_EFL_LIVE_MASK;
2521 uNewEflags |= X86_EFL_RA1_MASK;
2522 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2523
2524 /*
2525 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2526 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2527 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2528 */
2529 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2530 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2531
2532 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2533 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2534
2535 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2536 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2537
2538 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2539 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2540
2541 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2542 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2543
2544 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2545 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2546 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2547
2548 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2549 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2550 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2551 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2552
2553 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2554 {
2555 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2556 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2557 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2558 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2559 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2560 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2561 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2562 }
2563
2564 /*
2565 * Switch CR3 for the new task.
2566 */
2567 if ( fIsNewTSS386
2568 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2569 {
2570 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2571 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2572 AssertRCSuccessReturn(rc, rc);
2573
2574 /* Inform PGM. */
2575 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2576 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2577 AssertRCReturn(rc, rc);
2578 /* ignore informational status codes */
2579
2580 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2581 }
2582
2583 /*
2584 * Switch LDTR for the new task.
2585 */
2586 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2587 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2588 else
2589 {
2590 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2591
2592 IEMSELDESC DescNewLdt;
2593 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2594 if (rcStrict != VINF_SUCCESS)
2595 {
2596 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2597 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2598 return rcStrict;
2599 }
2600 if ( !DescNewLdt.Legacy.Gen.u1Present
2601 || DescNewLdt.Legacy.Gen.u1DescType
2602 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2603 {
2604 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2605 uNewLdt, DescNewLdt.Legacy.u));
2606 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2607 }
2608
2609 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2610 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2611 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2612 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2613 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2614 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2615 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2616 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2617 }
2618
2619 IEMSELDESC DescSS;
2620 if (IEM_IS_V86_MODE(pVCpu))
2621 {
2622 pVCpu->iem.s.uCpl = 3;
2623 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2624 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2625 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2626 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2627 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2628 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2629
2630 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2631 DescSS.Legacy.u = 0;
2632 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2633 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2634 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2635 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2636 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2637 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2638 DescSS.Legacy.Gen.u2Dpl = 3;
2639 }
2640 else
2641 {
2642 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2643
2644 /*
2645 * Load the stack segment for the new task.
2646 */
2647 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2648 {
2649 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2650 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2651 }
2652
2653 /* Fetch the descriptor. */
2654 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2655 if (rcStrict != VINF_SUCCESS)
2656 {
2657 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2658 VBOXSTRICTRC_VAL(rcStrict)));
2659 return rcStrict;
2660 }
2661
2662 /* SS must be a data segment and writable. */
2663 if ( !DescSS.Legacy.Gen.u1DescType
2664 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2665 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2666 {
2667 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2668 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2669 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2670 }
2671
2672 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2673 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2674 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2675 {
2676 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2677 uNewCpl));
2678 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2679 }
2680
2681 /* Is it there? */
2682 if (!DescSS.Legacy.Gen.u1Present)
2683 {
2684 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2685 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2686 }
2687
2688 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2689 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2690
2691 /* Set the accessed bit before committing the result into SS. */
2692 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2693 {
2694 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2695 if (rcStrict != VINF_SUCCESS)
2696 return rcStrict;
2697 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2698 }
2699
2700 /* Commit SS. */
2701 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2702 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2703 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2704 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2705 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2706 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2707 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2708
2709 /* CPL has changed, update IEM before loading rest of segments. */
2710 pVCpu->iem.s.uCpl = uNewCpl;
2711
2712 /*
2713 * Load the data segments for the new task.
2714 */
2715 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2716 if (rcStrict != VINF_SUCCESS)
2717 return rcStrict;
2718 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2719 if (rcStrict != VINF_SUCCESS)
2720 return rcStrict;
2721 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2722 if (rcStrict != VINF_SUCCESS)
2723 return rcStrict;
2724 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2725 if (rcStrict != VINF_SUCCESS)
2726 return rcStrict;
2727
2728 /*
2729 * Load the code segment for the new task.
2730 */
2731 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2732 {
2733 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2734 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2735 }
2736
2737 /* Fetch the descriptor. */
2738 IEMSELDESC DescCS;
2739 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2740 if (rcStrict != VINF_SUCCESS)
2741 {
2742 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2743 return rcStrict;
2744 }
2745
2746 /* CS must be a code segment. */
2747 if ( !DescCS.Legacy.Gen.u1DescType
2748 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2749 {
2750 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2751 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2752 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2753 }
2754
2755 /* For conforming CS, DPL must be less than or equal to the RPL. */
2756 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2757 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2758 {
2759 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2760 DescCS.Legacy.Gen.u2Dpl));
2761 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2762 }
2763
2764 /* For non-conforming CS, DPL must match RPL. */
2765 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2766 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2767 {
2768 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2769 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2770 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2771 }
2772
2773 /* Is it there? */
2774 if (!DescCS.Legacy.Gen.u1Present)
2775 {
2776 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2777 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2778 }
2779
2780 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2781 u64Base = X86DESC_BASE(&DescCS.Legacy);
2782
2783 /* Set the accessed bit before committing the result into CS. */
2784 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2785 {
2786 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2787 if (rcStrict != VINF_SUCCESS)
2788 return rcStrict;
2789 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2790 }
2791
2792 /* Commit CS. */
2793 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2794 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2795 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2796 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2797 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2798 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2799 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2800 }
2801
2802 /** @todo Debug trap. */
2803 if (fIsNewTSS386 && fNewDebugTrap)
2804 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2805
2806 /*
2807 * Construct the error code masks based on what caused this task switch.
2808 * See Intel Instruction reference for INT.
2809 */
2810 uint16_t uExt;
2811 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2812 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2813 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2814 {
2815 uExt = 1;
2816 }
2817 else
2818 uExt = 0;
2819
2820 /*
2821 * Push any error code on to the new stack.
2822 */
2823 if (fFlags & IEM_XCPT_FLAGS_ERR)
2824 {
2825 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2826 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2827 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2828
2829 /* Check that there is sufficient space on the stack. */
2830 /** @todo Factor out segment limit checking for normal/expand down segments
2831 * into a separate function. */
2832 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2833 {
2834 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2835 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2836 {
2837 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2838 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2839 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2840 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2841 }
2842 }
2843 else
2844 {
2845 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2846 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2847 {
2848 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2849 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2850 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2851 }
2852 }
2853
2854
2855 if (fIsNewTSS386)
2856 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2857 else
2858 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2859 if (rcStrict != VINF_SUCCESS)
2860 {
2861 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2862 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2863 return rcStrict;
2864 }
2865 }
2866
2867 /* Check the new EIP against the new CS limit. */
2868 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2869 {
2870 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2871 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2872 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2873 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2874 }
2875
2876 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2877 pVCpu->cpum.GstCtx.ss.Sel));
2878 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2879}
2880
2881
2882/**
2883 * Implements exceptions and interrupts for protected mode.
2884 *
2885 * @returns VBox strict status code.
2886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2887 * @param cbInstr The number of bytes to offset rIP by in the return
2888 * address.
2889 * @param u8Vector The interrupt / exception vector number.
2890 * @param fFlags The flags.
2891 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2892 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2893 */
2894static VBOXSTRICTRC
2895iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2896 uint8_t cbInstr,
2897 uint8_t u8Vector,
2898 uint32_t fFlags,
2899 uint16_t uErr,
2900 uint64_t uCr2) RT_NOEXCEPT
2901{
2902 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2903
2904 /*
2905 * Read the IDT entry.
2906 */
2907 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2908 {
2909 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2910 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2911 }
2912 X86DESC Idte;
2913 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2914 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2915 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2916 {
2917 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2918 return rcStrict;
2919 }
2920 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2921 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2922 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2923
2924 /*
2925 * Check the descriptor type, DPL and such.
2926 * ASSUMES this is done in the same order as described for call-gate calls.
2927 */
2928 if (Idte.Gate.u1DescType)
2929 {
2930 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2931 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2932 }
2933 bool fTaskGate = false;
2934 uint8_t f32BitGate = true;
2935 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2936 switch (Idte.Gate.u4Type)
2937 {
2938 case X86_SEL_TYPE_SYS_UNDEFINED:
2939 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2940 case X86_SEL_TYPE_SYS_LDT:
2941 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2942 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2943 case X86_SEL_TYPE_SYS_UNDEFINED2:
2944 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2945 case X86_SEL_TYPE_SYS_UNDEFINED3:
2946 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2947 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2948 case X86_SEL_TYPE_SYS_UNDEFINED4:
2949 {
2950 /** @todo check what actually happens when the type is wrong...
2951 * esp. call gates. */
2952 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2953 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2954 }
2955
2956 case X86_SEL_TYPE_SYS_286_INT_GATE:
2957 f32BitGate = false;
2958 RT_FALL_THRU();
2959 case X86_SEL_TYPE_SYS_386_INT_GATE:
2960 fEflToClear |= X86_EFL_IF;
2961 break;
2962
2963 case X86_SEL_TYPE_SYS_TASK_GATE:
2964 fTaskGate = true;
2965#ifndef IEM_IMPLEMENTS_TASKSWITCH
2966 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2967#endif
2968 break;
2969
2970 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2971 f32BitGate = false;
2972 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2973 break;
2974
2975 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2976 }
2977
2978 /* Check DPL against CPL if applicable. */
2979 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2980 {
2981 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2982 {
2983 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2984 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2985 }
2986 }
2987
2988 /* Is it there? */
2989 if (!Idte.Gate.u1Present)
2990 {
2991 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2992 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2993 }
2994
2995 /* Is it a task-gate? */
2996 if (fTaskGate)
2997 {
2998 /*
2999 * Construct the error code masks based on what caused this task switch.
3000 * See Intel Instruction reference for INT.
3001 */
3002 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3003 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3004 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3005 RTSEL SelTSS = Idte.Gate.u16Sel;
3006
3007 /*
3008 * Fetch the TSS descriptor in the GDT.
3009 */
3010 IEMSELDESC DescTSS;
3011 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3012 if (rcStrict != VINF_SUCCESS)
3013 {
3014 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3015 VBOXSTRICTRC_VAL(rcStrict)));
3016 return rcStrict;
3017 }
3018
3019 /* The TSS descriptor must be a system segment and be available (not busy). */
3020 if ( DescTSS.Legacy.Gen.u1DescType
3021 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3022 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3023 {
3024 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3025 u8Vector, SelTSS, DescTSS.Legacy.au64));
3026 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3027 }
3028
3029 /* The TSS must be present. */
3030 if (!DescTSS.Legacy.Gen.u1Present)
3031 {
3032 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3033 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3034 }
3035
3036 /* Do the actual task switch. */
3037 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3038 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3039 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3040 }
3041
3042 /* A null CS is bad. */
3043 RTSEL NewCS = Idte.Gate.u16Sel;
3044 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3045 {
3046 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3047 return iemRaiseGeneralProtectionFault0(pVCpu);
3048 }
3049
3050 /* Fetch the descriptor for the new CS. */
3051 IEMSELDESC DescCS;
3052 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3053 if (rcStrict != VINF_SUCCESS)
3054 {
3055 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3056 return rcStrict;
3057 }
3058
3059 /* Must be a code segment. */
3060 if (!DescCS.Legacy.Gen.u1DescType)
3061 {
3062 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3063 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3064 }
3065 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3066 {
3067 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3068 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3069 }
3070
3071 /* Don't allow lowering the privilege level. */
3072 /** @todo Does the lowering of privileges apply to software interrupts
3073 * only? This has bearings on the more-privileged or
3074 * same-privilege stack behavior further down. A testcase would
3075 * be nice. */
3076 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3077 {
3078 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3079 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3080 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3081 }
3082
3083 /* Make sure the selector is present. */
3084 if (!DescCS.Legacy.Gen.u1Present)
3085 {
3086 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3087 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3088 }
3089
3090 /* Check the new EIP against the new CS limit. */
3091 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3092 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3093 ? Idte.Gate.u16OffsetLow
3094 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3095 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3096 if (uNewEip > cbLimitCS)
3097 {
3098 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3099 u8Vector, uNewEip, cbLimitCS, NewCS));
3100 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3101 }
3102 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3103
3104 /* Calc the flag image to push. */
3105 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3106 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3107 fEfl &= ~X86_EFL_RF;
3108 else
3109 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3110
3111 /* From V8086 mode only go to CPL 0. */
3112 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3113 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3114 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3115 {
3116 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3117 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3118 }
3119
3120 /*
3121 * If the privilege level changes, we need to get a new stack from the TSS.
3122 * This in turns means validating the new SS and ESP...
3123 */
3124 if (uNewCpl != pVCpu->iem.s.uCpl)
3125 {
3126 RTSEL NewSS;
3127 uint32_t uNewEsp;
3128 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3129 if (rcStrict != VINF_SUCCESS)
3130 return rcStrict;
3131
3132 IEMSELDESC DescSS;
3133 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3134 if (rcStrict != VINF_SUCCESS)
3135 return rcStrict;
3136 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3137 if (!DescSS.Legacy.Gen.u1DefBig)
3138 {
3139 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3140 uNewEsp = (uint16_t)uNewEsp;
3141 }
3142
3143 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3144
3145 /* Check that there is sufficient space for the stack frame. */
3146 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3147 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3148 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3149 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3150
3151 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3152 {
3153 if ( uNewEsp - 1 > cbLimitSS
3154 || uNewEsp < cbStackFrame)
3155 {
3156 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3157 u8Vector, NewSS, uNewEsp, cbStackFrame));
3158 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3159 }
3160 }
3161 else
3162 {
3163 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3164 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3165 {
3166 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3167 u8Vector, NewSS, uNewEsp, cbStackFrame));
3168 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3169 }
3170 }
3171
3172 /*
3173 * Start making changes.
3174 */
3175
3176 /* Set the new CPL so that stack accesses use it. */
3177 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3178 pVCpu->iem.s.uCpl = uNewCpl;
3179
3180 /* Create the stack frame. */
3181 RTPTRUNION uStackFrame;
3182 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3183 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3184 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3185 if (rcStrict != VINF_SUCCESS)
3186 return rcStrict;
3187 void * const pvStackFrame = uStackFrame.pv;
3188 if (f32BitGate)
3189 {
3190 if (fFlags & IEM_XCPT_FLAGS_ERR)
3191 *uStackFrame.pu32++ = uErr;
3192 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3193 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3194 uStackFrame.pu32[2] = fEfl;
3195 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3196 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3197 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3198 if (fEfl & X86_EFL_VM)
3199 {
3200 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3201 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3202 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3203 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3204 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3205 }
3206 }
3207 else
3208 {
3209 if (fFlags & IEM_XCPT_FLAGS_ERR)
3210 *uStackFrame.pu16++ = uErr;
3211 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3212 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3213 uStackFrame.pu16[2] = fEfl;
3214 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3215 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3216 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3217 if (fEfl & X86_EFL_VM)
3218 {
3219 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3220 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3221 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3222 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3223 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3224 }
3225 }
3226 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3227 if (rcStrict != VINF_SUCCESS)
3228 return rcStrict;
3229
3230 /* Mark the selectors 'accessed' (hope this is the correct time). */
3231 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3232 * after pushing the stack frame? (Write protect the gdt + stack to
3233 * find out.) */
3234 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3235 {
3236 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3237 if (rcStrict != VINF_SUCCESS)
3238 return rcStrict;
3239 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3240 }
3241
3242 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3243 {
3244 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3245 if (rcStrict != VINF_SUCCESS)
3246 return rcStrict;
3247 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3248 }
3249
3250 /*
3251 * Start comitting the register changes (joins with the DPL=CPL branch).
3252 */
3253 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3254 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3255 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3256 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3257 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3258 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3259 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3260 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3261 * SP is loaded).
3262 * Need to check the other combinations too:
3263 * - 16-bit TSS, 32-bit handler
3264 * - 32-bit TSS, 16-bit handler */
3265 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3266 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3267 else
3268 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3269
3270 if (fEfl & X86_EFL_VM)
3271 {
3272 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3273 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3274 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3275 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3276 }
3277 }
3278 /*
3279 * Same privilege, no stack change and smaller stack frame.
3280 */
3281 else
3282 {
3283 uint64_t uNewRsp;
3284 RTPTRUNION uStackFrame;
3285 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3286 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3287 if (rcStrict != VINF_SUCCESS)
3288 return rcStrict;
3289 void * const pvStackFrame = uStackFrame.pv;
3290
3291 if (f32BitGate)
3292 {
3293 if (fFlags & IEM_XCPT_FLAGS_ERR)
3294 *uStackFrame.pu32++ = uErr;
3295 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3296 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3297 uStackFrame.pu32[2] = fEfl;
3298 }
3299 else
3300 {
3301 if (fFlags & IEM_XCPT_FLAGS_ERR)
3302 *uStackFrame.pu16++ = uErr;
3303 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3304 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3305 uStackFrame.pu16[2] = fEfl;
3306 }
3307 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3308 if (rcStrict != VINF_SUCCESS)
3309 return rcStrict;
3310
3311 /* Mark the CS selector as 'accessed'. */
3312 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3313 {
3314 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3315 if (rcStrict != VINF_SUCCESS)
3316 return rcStrict;
3317 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3318 }
3319
3320 /*
3321 * Start committing the register changes (joins with the other branch).
3322 */
3323 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3324 }
3325
3326 /* ... register committing continues. */
3327 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3328 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3329 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3330 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3331 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3332 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3333
3334 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3335 fEfl &= ~fEflToClear;
3336 IEMMISC_SET_EFL(pVCpu, fEfl);
3337
3338 if (fFlags & IEM_XCPT_FLAGS_CR2)
3339 pVCpu->cpum.GstCtx.cr2 = uCr2;
3340
3341 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3342 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3343
3344 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3345}
3346
3347
3348/**
3349 * Implements exceptions and interrupts for long mode.
3350 *
3351 * @returns VBox strict status code.
3352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3353 * @param cbInstr The number of bytes to offset rIP by in the return
3354 * address.
3355 * @param u8Vector The interrupt / exception vector number.
3356 * @param fFlags The flags.
3357 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3358 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3359 */
3360static VBOXSTRICTRC
3361iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3362 uint8_t cbInstr,
3363 uint8_t u8Vector,
3364 uint32_t fFlags,
3365 uint16_t uErr,
3366 uint64_t uCr2) RT_NOEXCEPT
3367{
3368 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3369
3370 /*
3371 * Read the IDT entry.
3372 */
3373 uint16_t offIdt = (uint16_t)u8Vector << 4;
3374 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3375 {
3376 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3377 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3378 }
3379 X86DESC64 Idte;
3380#ifdef _MSC_VER /* Shut up silly compiler warning. */
3381 Idte.au64[0] = 0;
3382 Idte.au64[1] = 0;
3383#endif
3384 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3385 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3386 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3387 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3388 {
3389 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3390 return rcStrict;
3391 }
3392 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3393 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3394 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3395
3396 /*
3397 * Check the descriptor type, DPL and such.
3398 * ASSUMES this is done in the same order as described for call-gate calls.
3399 */
3400 if (Idte.Gate.u1DescType)
3401 {
3402 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3403 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3404 }
3405 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3406 switch (Idte.Gate.u4Type)
3407 {
3408 case AMD64_SEL_TYPE_SYS_INT_GATE:
3409 fEflToClear |= X86_EFL_IF;
3410 break;
3411 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3412 break;
3413
3414 default:
3415 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3416 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3417 }
3418
3419 /* Check DPL against CPL if applicable. */
3420 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3421 {
3422 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3423 {
3424 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3425 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3426 }
3427 }
3428
3429 /* Is it there? */
3430 if (!Idte.Gate.u1Present)
3431 {
3432 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3433 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3434 }
3435
3436 /* A null CS is bad. */
3437 RTSEL NewCS = Idte.Gate.u16Sel;
3438 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3439 {
3440 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3441 return iemRaiseGeneralProtectionFault0(pVCpu);
3442 }
3443
3444 /* Fetch the descriptor for the new CS. */
3445 IEMSELDESC DescCS;
3446 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3447 if (rcStrict != VINF_SUCCESS)
3448 {
3449 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3450 return rcStrict;
3451 }
3452
3453 /* Must be a 64-bit code segment. */
3454 if (!DescCS.Long.Gen.u1DescType)
3455 {
3456 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3457 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3458 }
3459 if ( !DescCS.Long.Gen.u1Long
3460 || DescCS.Long.Gen.u1DefBig
3461 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3462 {
3463 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3464 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3465 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3466 }
3467
3468 /* Don't allow lowering the privilege level. For non-conforming CS
3469 selectors, the CS.DPL sets the privilege level the trap/interrupt
3470 handler runs at. For conforming CS selectors, the CPL remains
3471 unchanged, but the CS.DPL must be <= CPL. */
3472 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3473 * when CPU in Ring-0. Result \#GP? */
3474 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3475 {
3476 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3477 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3478 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3479 }
3480
3481
3482 /* Make sure the selector is present. */
3483 if (!DescCS.Legacy.Gen.u1Present)
3484 {
3485 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3486 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3487 }
3488
3489 /* Check that the new RIP is canonical. */
3490 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3491 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3492 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3493 if (!IEM_IS_CANONICAL(uNewRip))
3494 {
3495 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3496 return iemRaiseGeneralProtectionFault0(pVCpu);
3497 }
3498
3499 /*
3500 * If the privilege level changes or if the IST isn't zero, we need to get
3501 * a new stack from the TSS.
3502 */
3503 uint64_t uNewRsp;
3504 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3505 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3506 if ( uNewCpl != pVCpu->iem.s.uCpl
3507 || Idte.Gate.u3IST != 0)
3508 {
3509 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3510 if (rcStrict != VINF_SUCCESS)
3511 return rcStrict;
3512 }
3513 else
3514 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3515 uNewRsp &= ~(uint64_t)0xf;
3516
3517 /*
3518 * Calc the flag image to push.
3519 */
3520 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3521 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3522 fEfl &= ~X86_EFL_RF;
3523 else
3524 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3525
3526 /*
3527 * Start making changes.
3528 */
3529 /* Set the new CPL so that stack accesses use it. */
3530 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3531 pVCpu->iem.s.uCpl = uNewCpl;
3532
3533 /* Create the stack frame. */
3534 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3535 RTPTRUNION uStackFrame;
3536 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3537 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3538 if (rcStrict != VINF_SUCCESS)
3539 return rcStrict;
3540 void * const pvStackFrame = uStackFrame.pv;
3541
3542 if (fFlags & IEM_XCPT_FLAGS_ERR)
3543 *uStackFrame.pu64++ = uErr;
3544 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3545 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3546 uStackFrame.pu64[2] = fEfl;
3547 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3548 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3549 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3550 if (rcStrict != VINF_SUCCESS)
3551 return rcStrict;
3552
3553 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3554 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3555 * after pushing the stack frame? (Write protect the gdt + stack to
3556 * find out.) */
3557 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3558 {
3559 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3560 if (rcStrict != VINF_SUCCESS)
3561 return rcStrict;
3562 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3563 }
3564
3565 /*
3566 * Start comitting the register changes.
3567 */
3568 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3569 * hidden registers when interrupting 32-bit or 16-bit code! */
3570 if (uNewCpl != uOldCpl)
3571 {
3572 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3573 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3574 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3575 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3576 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3577 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3578 }
3579 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3580 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3581 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3582 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3583 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3584 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3585 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3586 pVCpu->cpum.GstCtx.rip = uNewRip;
3587
3588 fEfl &= ~fEflToClear;
3589 IEMMISC_SET_EFL(pVCpu, fEfl);
3590
3591 if (fFlags & IEM_XCPT_FLAGS_CR2)
3592 pVCpu->cpum.GstCtx.cr2 = uCr2;
3593
3594 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3595 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3596
3597 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3598}
3599
3600
3601/**
3602 * Implements exceptions and interrupts.
3603 *
3604 * All exceptions and interrupts goes thru this function!
3605 *
3606 * @returns VBox strict status code.
3607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3608 * @param cbInstr The number of bytes to offset rIP by in the return
3609 * address.
3610 * @param u8Vector The interrupt / exception vector number.
3611 * @param fFlags The flags.
3612 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3613 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3614 */
3615VBOXSTRICTRC
3616iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3617 uint8_t cbInstr,
3618 uint8_t u8Vector,
3619 uint32_t fFlags,
3620 uint16_t uErr,
3621 uint64_t uCr2) RT_NOEXCEPT
3622{
3623 /*
3624 * Get all the state that we might need here.
3625 */
3626 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3627 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3628
3629#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3630 /*
3631 * Flush prefetch buffer
3632 */
3633 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3634#endif
3635
3636 /*
3637 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3638 */
3639 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3640 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3641 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3642 | IEM_XCPT_FLAGS_BP_INSTR
3643 | IEM_XCPT_FLAGS_ICEBP_INSTR
3644 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3645 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3646 {
3647 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3648 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3649 u8Vector = X86_XCPT_GP;
3650 uErr = 0;
3651 }
3652#ifdef DBGFTRACE_ENABLED
3653 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3654 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3655 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3656#endif
3657
3658 /*
3659 * Evaluate whether NMI blocking should be in effect.
3660 * Normally, NMI blocking is in effect whenever we inject an NMI.
3661 */
3662 bool fBlockNmi;
3663 if ( u8Vector == X86_XCPT_NMI
3664 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3665 fBlockNmi = true;
3666 else
3667 fBlockNmi = false;
3668
3669#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3670 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3671 {
3672 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3673 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3674 return rcStrict0;
3675
3676 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3677 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3678 {
3679 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3680 fBlockNmi = false;
3681 }
3682 }
3683#endif
3684
3685#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3686 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3687 {
3688 /*
3689 * If the event is being injected as part of VMRUN, it isn't subject to event
3690 * intercepts in the nested-guest. However, secondary exceptions that occur
3691 * during injection of any event -are- subject to exception intercepts.
3692 *
3693 * See AMD spec. 15.20 "Event Injection".
3694 */
3695 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3696 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3697 else
3698 {
3699 /*
3700 * Check and handle if the event being raised is intercepted.
3701 */
3702 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3703 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3704 return rcStrict0;
3705 }
3706 }
3707#endif
3708
3709 /*
3710 * Set NMI blocking if necessary.
3711 */
3712 if ( fBlockNmi
3713 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3714 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3715
3716 /*
3717 * Do recursion accounting.
3718 */
3719 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3720 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3721 if (pVCpu->iem.s.cXcptRecursions == 0)
3722 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3723 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3724 else
3725 {
3726 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3727 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3728 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3729
3730 if (pVCpu->iem.s.cXcptRecursions >= 4)
3731 {
3732#ifdef DEBUG_bird
3733 AssertFailed();
3734#endif
3735 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3736 }
3737
3738 /*
3739 * Evaluate the sequence of recurring events.
3740 */
3741 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3742 NULL /* pXcptRaiseInfo */);
3743 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3744 { /* likely */ }
3745 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3746 {
3747 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3748 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3749 u8Vector = X86_XCPT_DF;
3750 uErr = 0;
3751#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3752 /* VMX nested-guest #DF intercept needs to be checked here. */
3753 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3754 {
3755 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3756 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3757 return rcStrict0;
3758 }
3759#endif
3760 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3761 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3762 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3763 }
3764 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3765 {
3766 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3767 return iemInitiateCpuShutdown(pVCpu);
3768 }
3769 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3770 {
3771 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3772 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3773 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3774 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3775 return VERR_EM_GUEST_CPU_HANG;
3776 }
3777 else
3778 {
3779 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3780 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3781 return VERR_IEM_IPE_9;
3782 }
3783
3784 /*
3785 * The 'EXT' bit is set when an exception occurs during deliver of an external
3786 * event (such as an interrupt or earlier exception)[1]. Privileged software
3787 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3788 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3789 *
3790 * [1] - Intel spec. 6.13 "Error Code"
3791 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3792 * [3] - Intel Instruction reference for INT n.
3793 */
3794 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3795 && (fFlags & IEM_XCPT_FLAGS_ERR)
3796 && u8Vector != X86_XCPT_PF
3797 && u8Vector != X86_XCPT_DF)
3798 {
3799 uErr |= X86_TRAP_ERR_EXTERNAL;
3800 }
3801 }
3802
3803 pVCpu->iem.s.cXcptRecursions++;
3804 pVCpu->iem.s.uCurXcpt = u8Vector;
3805 pVCpu->iem.s.fCurXcpt = fFlags;
3806 pVCpu->iem.s.uCurXcptErr = uErr;
3807 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3808
3809 /*
3810 * Extensive logging.
3811 */
3812#if defined(LOG_ENABLED) && defined(IN_RING3)
3813 if (LogIs3Enabled())
3814 {
3815 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3816 PVM pVM = pVCpu->CTX_SUFF(pVM);
3817 char szRegs[4096];
3818 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3819 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3820 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3821 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3822 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3823 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3824 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3825 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3826 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3827 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3828 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3829 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3830 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3831 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3832 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3833 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3834 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3835 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3836 " efer=%016VR{efer}\n"
3837 " pat=%016VR{pat}\n"
3838 " sf_mask=%016VR{sf_mask}\n"
3839 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3840 " lstar=%016VR{lstar}\n"
3841 " star=%016VR{star} cstar=%016VR{cstar}\n"
3842 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3843 );
3844
3845 char szInstr[256];
3846 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3847 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3848 szInstr, sizeof(szInstr), NULL);
3849 Log3(("%s%s\n", szRegs, szInstr));
3850 }
3851#endif /* LOG_ENABLED */
3852
3853 /*
3854 * Stats.
3855 */
3856 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3857 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3858 else if (u8Vector <= X86_XCPT_LAST)
3859 {
3860 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3861 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3862 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3863 }
3864
3865 /*
3866 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3867 * to ensure that a stale TLB or paging cache entry will only cause one
3868 * spurious #PF.
3869 */
3870 if ( u8Vector == X86_XCPT_PF
3871 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3872 IEMTlbInvalidatePage(pVCpu, uCr2);
3873
3874 /*
3875 * Call the mode specific worker function.
3876 */
3877 VBOXSTRICTRC rcStrict;
3878 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3879 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3880 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3881 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3882 else
3883 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3884
3885 /* Flush the prefetch buffer. */
3886#ifdef IEM_WITH_CODE_TLB
3887 pVCpu->iem.s.pbInstrBuf = NULL;
3888#else
3889 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3890#endif
3891
3892 /*
3893 * Unwind.
3894 */
3895 pVCpu->iem.s.cXcptRecursions--;
3896 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3897 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3898 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3899 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3900 pVCpu->iem.s.cXcptRecursions + 1));
3901 return rcStrict;
3902}
3903
3904#ifdef IEM_WITH_SETJMP
3905/**
3906 * See iemRaiseXcptOrInt. Will not return.
3907 */
3908DECL_NO_RETURN(void)
3909iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3910 uint8_t cbInstr,
3911 uint8_t u8Vector,
3912 uint32_t fFlags,
3913 uint16_t uErr,
3914 uint64_t uCr2) RT_NOEXCEPT
3915{
3916 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3917 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3918}
3919#endif
3920
3921
3922/** \#DE - 00. */
3923VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3924{
3925 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3926}
3927
3928
3929/** \#DB - 01.
3930 * @note This automatically clear DR7.GD. */
3931VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3932{
3933 /** @todo set/clear RF. */
3934 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3935 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3936}
3937
3938
3939/** \#BR - 05. */
3940VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3941{
3942 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3943}
3944
3945
3946/** \#UD - 06. */
3947VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3948{
3949 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3950}
3951
3952
3953/** \#NM - 07. */
3954VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3955{
3956 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3957}
3958
3959
3960/** \#TS(err) - 0a. */
3961VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3962{
3963 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3964}
3965
3966
3967/** \#TS(tr) - 0a. */
3968VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3969{
3970 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3971 pVCpu->cpum.GstCtx.tr.Sel, 0);
3972}
3973
3974
3975/** \#TS(0) - 0a. */
3976VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3977{
3978 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3979 0, 0);
3980}
3981
3982
3983/** \#TS(err) - 0a. */
3984VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3985{
3986 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3987 uSel & X86_SEL_MASK_OFF_RPL, 0);
3988}
3989
3990
3991/** \#NP(err) - 0b. */
3992VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3993{
3994 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3995}
3996
3997
3998/** \#NP(sel) - 0b. */
3999VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4000{
4001 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4002 uSel & ~X86_SEL_RPL, 0);
4003}
4004
4005
4006/** \#SS(seg) - 0c. */
4007VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4008{
4009 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4010 uSel & ~X86_SEL_RPL, 0);
4011}
4012
4013
4014/** \#SS(err) - 0c. */
4015VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4016{
4017 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4018}
4019
4020
4021/** \#GP(n) - 0d. */
4022VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4023{
4024 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4025}
4026
4027
4028/** \#GP(0) - 0d. */
4029VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4030{
4031 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4032}
4033
4034#ifdef IEM_WITH_SETJMP
4035/** \#GP(0) - 0d. */
4036DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4037{
4038 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4039}
4040#endif
4041
4042
4043/** \#GP(sel) - 0d. */
4044VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4045{
4046 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4047 Sel & ~X86_SEL_RPL, 0);
4048}
4049
4050
4051/** \#GP(0) - 0d. */
4052VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4053{
4054 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4055}
4056
4057
4058/** \#GP(sel) - 0d. */
4059VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4060{
4061 NOREF(iSegReg); NOREF(fAccess);
4062 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4063 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4064}
4065
4066#ifdef IEM_WITH_SETJMP
4067/** \#GP(sel) - 0d, longjmp. */
4068DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4069{
4070 NOREF(iSegReg); NOREF(fAccess);
4071 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4072 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4073}
4074#endif
4075
4076/** \#GP(sel) - 0d. */
4077VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4078{
4079 NOREF(Sel);
4080 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4081}
4082
4083#ifdef IEM_WITH_SETJMP
4084/** \#GP(sel) - 0d, longjmp. */
4085DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4086{
4087 NOREF(Sel);
4088 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4089}
4090#endif
4091
4092
4093/** \#GP(sel) - 0d. */
4094VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4095{
4096 NOREF(iSegReg); NOREF(fAccess);
4097 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4098}
4099
4100#ifdef IEM_WITH_SETJMP
4101/** \#GP(sel) - 0d, longjmp. */
4102DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4103{
4104 NOREF(iSegReg); NOREF(fAccess);
4105 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4106}
4107#endif
4108
4109
4110/** \#PF(n) - 0e. */
4111VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4112{
4113 uint16_t uErr;
4114 switch (rc)
4115 {
4116 case VERR_PAGE_NOT_PRESENT:
4117 case VERR_PAGE_TABLE_NOT_PRESENT:
4118 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4119 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4120 uErr = 0;
4121 break;
4122
4123 default:
4124 AssertMsgFailed(("%Rrc\n", rc));
4125 RT_FALL_THRU();
4126 case VERR_ACCESS_DENIED:
4127 uErr = X86_TRAP_PF_P;
4128 break;
4129
4130 /** @todo reserved */
4131 }
4132
4133 if (pVCpu->iem.s.uCpl == 3)
4134 uErr |= X86_TRAP_PF_US;
4135
4136 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4137 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4138 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4139 uErr |= X86_TRAP_PF_ID;
4140
4141#if 0 /* This is so much non-sense, really. Why was it done like that? */
4142 /* Note! RW access callers reporting a WRITE protection fault, will clear
4143 the READ flag before calling. So, read-modify-write accesses (RW)
4144 can safely be reported as READ faults. */
4145 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4146 uErr |= X86_TRAP_PF_RW;
4147#else
4148 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4149 {
4150 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4151 /// (regardless of outcome of the comparison in the latter case).
4152 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4153 uErr |= X86_TRAP_PF_RW;
4154 }
4155#endif
4156
4157 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4158 uErr, GCPtrWhere);
4159}
4160
4161#ifdef IEM_WITH_SETJMP
4162/** \#PF(n) - 0e, longjmp. */
4163DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4164{
4165 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4166}
4167#endif
4168
4169
4170/** \#MF(0) - 10. */
4171VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4172{
4173 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4174}
4175
4176
4177/** \#AC(0) - 11. */
4178VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4179{
4180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4181}
4182
4183#ifdef IEM_WITH_SETJMP
4184/** \#AC(0) - 11, longjmp. */
4185DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4186{
4187 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4188}
4189#endif
4190
4191
4192/** \#XF(0)/\#XM(0) - 19. */
4193VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4196}
4197
4198
4199/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4200IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4201{
4202 NOREF(cbInstr);
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4204}
4205
4206
4207/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4208IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4209{
4210 NOREF(cbInstr);
4211 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4212}
4213
4214
4215/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4216IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4217{
4218 NOREF(cbInstr);
4219 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4220}
4221
4222
4223/** @} */
4224
4225/** @name Common opcode decoders.
4226 * @{
4227 */
4228//#include <iprt/mem.h>
4229
4230/**
4231 * Used to add extra details about a stub case.
4232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4233 */
4234void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4235{
4236#if defined(LOG_ENABLED) && defined(IN_RING3)
4237 PVM pVM = pVCpu->CTX_SUFF(pVM);
4238 char szRegs[4096];
4239 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4240 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4241 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4242 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4243 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4244 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4245 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4246 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4247 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4248 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4249 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4250 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4251 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4252 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4253 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4254 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4255 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4256 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4257 " efer=%016VR{efer}\n"
4258 " pat=%016VR{pat}\n"
4259 " sf_mask=%016VR{sf_mask}\n"
4260 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4261 " lstar=%016VR{lstar}\n"
4262 " star=%016VR{star} cstar=%016VR{cstar}\n"
4263 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4264 );
4265
4266 char szInstr[256];
4267 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4268 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4269 szInstr, sizeof(szInstr), NULL);
4270
4271 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4272#else
4273 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4274#endif
4275}
4276
4277/** @} */
4278
4279
4280
4281/** @name Register Access.
4282 * @{
4283 */
4284
4285/**
4286 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4287 *
4288 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4289 * segment limit.
4290 *
4291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4292 * @param offNextInstr The offset of the next instruction.
4293 */
4294VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4295{
4296 switch (pVCpu->iem.s.enmEffOpSize)
4297 {
4298 case IEMMODE_16BIT:
4299 {
4300 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4301 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4302 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4303 return iemRaiseGeneralProtectionFault0(pVCpu);
4304 pVCpu->cpum.GstCtx.rip = uNewIp;
4305 break;
4306 }
4307
4308 case IEMMODE_32BIT:
4309 {
4310 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4311 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4312
4313 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4314 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4315 return iemRaiseGeneralProtectionFault0(pVCpu);
4316 pVCpu->cpum.GstCtx.rip = uNewEip;
4317 break;
4318 }
4319
4320 case IEMMODE_64BIT:
4321 {
4322 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4323
4324 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4325 if (!IEM_IS_CANONICAL(uNewRip))
4326 return iemRaiseGeneralProtectionFault0(pVCpu);
4327 pVCpu->cpum.GstCtx.rip = uNewRip;
4328 break;
4329 }
4330
4331 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4332 }
4333
4334 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4335
4336#ifndef IEM_WITH_CODE_TLB
4337 /* Flush the prefetch buffer. */
4338 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4339#endif
4340
4341 return VINF_SUCCESS;
4342}
4343
4344
4345/**
4346 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4347 *
4348 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4349 * segment limit.
4350 *
4351 * @returns Strict VBox status code.
4352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4353 * @param offNextInstr The offset of the next instruction.
4354 */
4355VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4356{
4357 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4358
4359 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4360 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4361 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4362 return iemRaiseGeneralProtectionFault0(pVCpu);
4363 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4364 pVCpu->cpum.GstCtx.rip = uNewIp;
4365 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4366
4367#ifndef IEM_WITH_CODE_TLB
4368 /* Flush the prefetch buffer. */
4369 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4370#endif
4371
4372 return VINF_SUCCESS;
4373}
4374
4375
4376/**
4377 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4378 *
4379 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4380 * segment limit.
4381 *
4382 * @returns Strict VBox status code.
4383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4384 * @param offNextInstr The offset of the next instruction.
4385 */
4386VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4387{
4388 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4389
4390 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4391 {
4392 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4393
4394 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4395 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4396 return iemRaiseGeneralProtectionFault0(pVCpu);
4397 pVCpu->cpum.GstCtx.rip = uNewEip;
4398 }
4399 else
4400 {
4401 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4402
4403 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4404 if (!IEM_IS_CANONICAL(uNewRip))
4405 return iemRaiseGeneralProtectionFault0(pVCpu);
4406 pVCpu->cpum.GstCtx.rip = uNewRip;
4407 }
4408 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4409
4410#ifndef IEM_WITH_CODE_TLB
4411 /* Flush the prefetch buffer. */
4412 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4413#endif
4414
4415 return VINF_SUCCESS;
4416}
4417
4418
4419/**
4420 * Performs a near jump to the specified address.
4421 *
4422 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4423 * segment limit.
4424 *
4425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4426 * @param uNewRip The new RIP value.
4427 */
4428VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4429{
4430 switch (pVCpu->iem.s.enmEffOpSize)
4431 {
4432 case IEMMODE_16BIT:
4433 {
4434 Assert(uNewRip <= UINT16_MAX);
4435 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4436 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4437 return iemRaiseGeneralProtectionFault0(pVCpu);
4438 /** @todo Test 16-bit jump in 64-bit mode. */
4439 pVCpu->cpum.GstCtx.rip = uNewRip;
4440 break;
4441 }
4442
4443 case IEMMODE_32BIT:
4444 {
4445 Assert(uNewRip <= UINT32_MAX);
4446 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4447 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4448
4449 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4450 return iemRaiseGeneralProtectionFault0(pVCpu);
4451 pVCpu->cpum.GstCtx.rip = uNewRip;
4452 break;
4453 }
4454
4455 case IEMMODE_64BIT:
4456 {
4457 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4458
4459 if (!IEM_IS_CANONICAL(uNewRip))
4460 return iemRaiseGeneralProtectionFault0(pVCpu);
4461 pVCpu->cpum.GstCtx.rip = uNewRip;
4462 break;
4463 }
4464
4465 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4466 }
4467
4468 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4469
4470#ifndef IEM_WITH_CODE_TLB
4471 /* Flush the prefetch buffer. */
4472 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4473#endif
4474
4475 return VINF_SUCCESS;
4476}
4477
4478/** @} */
4479
4480
4481/** @name FPU access and helpers.
4482 *
4483 * @{
4484 */
4485
4486/**
4487 * Updates the x87.DS and FPUDP registers.
4488 *
4489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4490 * @param pFpuCtx The FPU context.
4491 * @param iEffSeg The effective segment register.
4492 * @param GCPtrEff The effective address relative to @a iEffSeg.
4493 */
4494DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4495{
4496 RTSEL sel;
4497 switch (iEffSeg)
4498 {
4499 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4500 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4501 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4502 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4503 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4504 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4505 default:
4506 AssertMsgFailed(("%d\n", iEffSeg));
4507 sel = pVCpu->cpum.GstCtx.ds.Sel;
4508 }
4509 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4510 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4511 {
4512 pFpuCtx->DS = 0;
4513 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4514 }
4515 else if (!IEM_IS_LONG_MODE(pVCpu))
4516 {
4517 pFpuCtx->DS = sel;
4518 pFpuCtx->FPUDP = GCPtrEff;
4519 }
4520 else
4521 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4522}
4523
4524
4525/**
4526 * Rotates the stack registers in the push direction.
4527 *
4528 * @param pFpuCtx The FPU context.
4529 * @remarks This is a complete waste of time, but fxsave stores the registers in
4530 * stack order.
4531 */
4532DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4533{
4534 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4535 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4536 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4537 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4538 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4539 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4540 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4541 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4542 pFpuCtx->aRegs[0].r80 = r80Tmp;
4543}
4544
4545
4546/**
4547 * Rotates the stack registers in the pop direction.
4548 *
4549 * @param pFpuCtx The FPU context.
4550 * @remarks This is a complete waste of time, but fxsave stores the registers in
4551 * stack order.
4552 */
4553DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4554{
4555 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4556 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4557 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4558 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4559 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4560 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4561 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4562 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4563 pFpuCtx->aRegs[7].r80 = r80Tmp;
4564}
4565
4566
4567/**
4568 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4569 * exception prevents it.
4570 *
4571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4572 * @param pResult The FPU operation result to push.
4573 * @param pFpuCtx The FPU context.
4574 */
4575static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4576{
4577 /* Update FSW and bail if there are pending exceptions afterwards. */
4578 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4579 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4580 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4581 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4582 {
4583 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4584 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4585 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4586 pFpuCtx->FSW = fFsw;
4587 return;
4588 }
4589
4590 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4591 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4592 {
4593 /* All is fine, push the actual value. */
4594 pFpuCtx->FTW |= RT_BIT(iNewTop);
4595 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4596 }
4597 else if (pFpuCtx->FCW & X86_FCW_IM)
4598 {
4599 /* Masked stack overflow, push QNaN. */
4600 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4601 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4602 }
4603 else
4604 {
4605 /* Raise stack overflow, don't push anything. */
4606 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4607 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4608 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4609 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4610 return;
4611 }
4612
4613 fFsw &= ~X86_FSW_TOP_MASK;
4614 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4615 pFpuCtx->FSW = fFsw;
4616
4617 iemFpuRotateStackPush(pFpuCtx);
4618 RT_NOREF(pVCpu);
4619}
4620
4621
4622/**
4623 * Stores a result in a FPU register and updates the FSW and FTW.
4624 *
4625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4626 * @param pFpuCtx The FPU context.
4627 * @param pResult The result to store.
4628 * @param iStReg Which FPU register to store it in.
4629 */
4630static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4631{
4632 Assert(iStReg < 8);
4633 uint16_t fNewFsw = pFpuCtx->FSW;
4634 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4635 fNewFsw &= ~X86_FSW_C_MASK;
4636 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4637 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4638 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4639 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4640 pFpuCtx->FSW = fNewFsw;
4641 pFpuCtx->FTW |= RT_BIT(iReg);
4642 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4643 RT_NOREF(pVCpu);
4644}
4645
4646
4647/**
4648 * Only updates the FPU status word (FSW) with the result of the current
4649 * instruction.
4650 *
4651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4652 * @param pFpuCtx The FPU context.
4653 * @param u16FSW The FSW output of the current instruction.
4654 */
4655static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4656{
4657 uint16_t fNewFsw = pFpuCtx->FSW;
4658 fNewFsw &= ~X86_FSW_C_MASK;
4659 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4660 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4661 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4662 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4663 pFpuCtx->FSW = fNewFsw;
4664 RT_NOREF(pVCpu);
4665}
4666
4667
4668/**
4669 * Pops one item off the FPU stack if no pending exception prevents it.
4670 *
4671 * @param pFpuCtx The FPU context.
4672 */
4673static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4674{
4675 /* Check pending exceptions. */
4676 uint16_t uFSW = pFpuCtx->FSW;
4677 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4678 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4679 return;
4680
4681 /* TOP--. */
4682 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4683 uFSW &= ~X86_FSW_TOP_MASK;
4684 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4685 pFpuCtx->FSW = uFSW;
4686
4687 /* Mark the previous ST0 as empty. */
4688 iOldTop >>= X86_FSW_TOP_SHIFT;
4689 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4690
4691 /* Rotate the registers. */
4692 iemFpuRotateStackPop(pFpuCtx);
4693}
4694
4695
4696/**
4697 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4698 *
4699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4700 * @param pResult The FPU operation result to push.
4701 */
4702void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4703{
4704 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4705 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4706 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4707}
4708
4709
4710/**
4711 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4712 * and sets FPUDP and FPUDS.
4713 *
4714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4715 * @param pResult The FPU operation result to push.
4716 * @param iEffSeg The effective segment register.
4717 * @param GCPtrEff The effective address relative to @a iEffSeg.
4718 */
4719void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4720{
4721 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4722 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4723 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4724 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4725}
4726
4727
4728/**
4729 * Replace ST0 with the first value and push the second onto the FPU stack,
4730 * unless a pending exception prevents it.
4731 *
4732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4733 * @param pResult The FPU operation result to store and push.
4734 */
4735void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4736{
4737 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4738 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4739
4740 /* Update FSW and bail if there are pending exceptions afterwards. */
4741 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4742 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4743 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4744 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4745 {
4746 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4747 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4748 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4749 pFpuCtx->FSW = fFsw;
4750 return;
4751 }
4752
4753 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4754 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4755 {
4756 /* All is fine, push the actual value. */
4757 pFpuCtx->FTW |= RT_BIT(iNewTop);
4758 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4759 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4760 }
4761 else if (pFpuCtx->FCW & X86_FCW_IM)
4762 {
4763 /* Masked stack overflow, push QNaN. */
4764 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4765 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4766 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4767 }
4768 else
4769 {
4770 /* Raise stack overflow, don't push anything. */
4771 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4772 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4773 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4774 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4775 return;
4776 }
4777
4778 fFsw &= ~X86_FSW_TOP_MASK;
4779 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4780 pFpuCtx->FSW = fFsw;
4781
4782 iemFpuRotateStackPush(pFpuCtx);
4783}
4784
4785
4786/**
4787 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4788 * FOP.
4789 *
4790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4791 * @param pResult The result to store.
4792 * @param iStReg Which FPU register to store it in.
4793 */
4794void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4795{
4796 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4797 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4798 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4799}
4800
4801
4802/**
4803 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4804 * FOP, and then pops the stack.
4805 *
4806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4807 * @param pResult The result to store.
4808 * @param iStReg Which FPU register to store it in.
4809 */
4810void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4811{
4812 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4813 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4814 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4815 iemFpuMaybePopOne(pFpuCtx);
4816}
4817
4818
4819/**
4820 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4821 * FPUDP, and FPUDS.
4822 *
4823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4824 * @param pResult The result to store.
4825 * @param iStReg Which FPU register to store it in.
4826 * @param iEffSeg The effective memory operand selector register.
4827 * @param GCPtrEff The effective memory operand offset.
4828 */
4829void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4830 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4831{
4832 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4833 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4834 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4835 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4836}
4837
4838
4839/**
4840 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4841 * FPUDP, and FPUDS, and then pops the stack.
4842 *
4843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4844 * @param pResult The result to store.
4845 * @param iStReg Which FPU register to store it in.
4846 * @param iEffSeg The effective memory operand selector register.
4847 * @param GCPtrEff The effective memory operand offset.
4848 */
4849void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4850 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4851{
4852 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4853 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4854 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4855 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4856 iemFpuMaybePopOne(pFpuCtx);
4857}
4858
4859
4860/**
4861 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4862 *
4863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4864 */
4865void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4866{
4867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4868 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4869}
4870
4871
4872/**
4873 * Updates the FSW, FOP, FPUIP, and FPUCS.
4874 *
4875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4876 * @param u16FSW The FSW from the current instruction.
4877 */
4878void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4879{
4880 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4881 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4882 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4883}
4884
4885
4886/**
4887 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4888 *
4889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4890 * @param u16FSW The FSW from the current instruction.
4891 */
4892void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4893{
4894 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4895 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4896 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4897 iemFpuMaybePopOne(pFpuCtx);
4898}
4899
4900
4901/**
4902 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4903 *
4904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4905 * @param u16FSW The FSW from the current instruction.
4906 * @param iEffSeg The effective memory operand selector register.
4907 * @param GCPtrEff The effective memory operand offset.
4908 */
4909void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4910{
4911 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4912 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4913 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4914 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4915}
4916
4917
4918/**
4919 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4920 *
4921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4922 * @param u16FSW The FSW from the current instruction.
4923 */
4924void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4925{
4926 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4927 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4928 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4929 iemFpuMaybePopOne(pFpuCtx);
4930 iemFpuMaybePopOne(pFpuCtx);
4931}
4932
4933
4934/**
4935 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4936 *
4937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4938 * @param u16FSW The FSW from the current instruction.
4939 * @param iEffSeg The effective memory operand selector register.
4940 * @param GCPtrEff The effective memory operand offset.
4941 */
4942void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4943{
4944 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4945 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4946 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4947 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4948 iemFpuMaybePopOne(pFpuCtx);
4949}
4950
4951
4952/**
4953 * Worker routine for raising an FPU stack underflow exception.
4954 *
4955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4956 * @param pFpuCtx The FPU context.
4957 * @param iStReg The stack register being accessed.
4958 */
4959static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
4960{
4961 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4962 if (pFpuCtx->FCW & X86_FCW_IM)
4963 {
4964 /* Masked underflow. */
4965 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4966 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4967 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4968 if (iStReg != UINT8_MAX)
4969 {
4970 pFpuCtx->FTW |= RT_BIT(iReg);
4971 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4972 }
4973 }
4974 else
4975 {
4976 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4977 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4978 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
4979 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4980 }
4981 RT_NOREF(pVCpu);
4982}
4983
4984
4985/**
4986 * Raises a FPU stack underflow exception.
4987 *
4988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4989 * @param iStReg The destination register that should be loaded
4990 * with QNaN if \#IS is not masked. Specify
4991 * UINT8_MAX if none (like for fcom).
4992 */
4993void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4994{
4995 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4996 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4997 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
4998}
4999
5000
5001void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5002{
5003 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5004 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5005 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5006 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5007}
5008
5009
5010void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5011{
5012 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5013 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5014 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5015 iemFpuMaybePopOne(pFpuCtx);
5016}
5017
5018
5019void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5020{
5021 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5022 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5023 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5024 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5025 iemFpuMaybePopOne(pFpuCtx);
5026}
5027
5028
5029void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5030{
5031 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5032 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5033 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5034 iemFpuMaybePopOne(pFpuCtx);
5035 iemFpuMaybePopOne(pFpuCtx);
5036}
5037
5038
5039void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5040{
5041 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5042 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5043
5044 if (pFpuCtx->FCW & X86_FCW_IM)
5045 {
5046 /* Masked overflow - Push QNaN. */
5047 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5048 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5049 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5050 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5051 pFpuCtx->FTW |= RT_BIT(iNewTop);
5052 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5053 iemFpuRotateStackPush(pFpuCtx);
5054 }
5055 else
5056 {
5057 /* Exception pending - don't change TOP or the register stack. */
5058 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5059 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5060 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5061 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5062 }
5063}
5064
5065
5066void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5067{
5068 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5069 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5070
5071 if (pFpuCtx->FCW & X86_FCW_IM)
5072 {
5073 /* Masked overflow - Push QNaN. */
5074 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5075 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5076 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5077 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5078 pFpuCtx->FTW |= RT_BIT(iNewTop);
5079 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5080 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5081 iemFpuRotateStackPush(pFpuCtx);
5082 }
5083 else
5084 {
5085 /* Exception pending - don't change TOP or the register stack. */
5086 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5087 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5088 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5089 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5090 }
5091}
5092
5093
5094/**
5095 * Worker routine for raising an FPU stack overflow exception on a push.
5096 *
5097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5098 * @param pFpuCtx The FPU context.
5099 */
5100static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5101{
5102 if (pFpuCtx->FCW & X86_FCW_IM)
5103 {
5104 /* Masked overflow. */
5105 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5106 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5107 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5108 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5109 pFpuCtx->FTW |= RT_BIT(iNewTop);
5110 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5111 iemFpuRotateStackPush(pFpuCtx);
5112 }
5113 else
5114 {
5115 /* Exception pending - don't change TOP or the register stack. */
5116 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5117 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5118 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5119 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5120 }
5121 RT_NOREF(pVCpu);
5122}
5123
5124
5125/**
5126 * Raises a FPU stack overflow exception on a push.
5127 *
5128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5129 */
5130void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5131{
5132 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5133 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5134 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5135}
5136
5137
5138/**
5139 * Raises a FPU stack overflow exception on a push with a memory operand.
5140 *
5141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5142 * @param iEffSeg The effective memory operand selector register.
5143 * @param GCPtrEff The effective memory operand offset.
5144 */
5145void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5146{
5147 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5148 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5149 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5150 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5151}
5152
5153/** @} */
5154
5155
5156/** @name SSE+AVX SIMD access and helpers.
5157 *
5158 * @{
5159 */
5160/**
5161 * Stores a result in a SIMD XMM register, updates the MXCSR.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param pResult The result to store.
5165 * @param iXmmReg Which SIMD XMM register to store the result in.
5166 */
5167void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5168{
5169 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5170 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5171
5172 /* The result is only updated if there is no unmasked exception pending. */
5173 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
5174 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0)
5175 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5176}
5177
5178
5179/**
5180 * Updates the MXCSR.
5181 *
5182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5183 * @param fMxcsr The new MXCSR value.
5184 */
5185void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5189}
5190/** @} */
5191
5192
5193/** @name Memory access.
5194 *
5195 * @{
5196 */
5197
5198
5199/**
5200 * Updates the IEMCPU::cbWritten counter if applicable.
5201 *
5202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5203 * @param fAccess The access being accounted for.
5204 * @param cbMem The access size.
5205 */
5206DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5207{
5208 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5209 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5210 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5211}
5212
5213
5214/**
5215 * Applies the segment limit, base and attributes.
5216 *
5217 * This may raise a \#GP or \#SS.
5218 *
5219 * @returns VBox strict status code.
5220 *
5221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5222 * @param fAccess The kind of access which is being performed.
5223 * @param iSegReg The index of the segment register to apply.
5224 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5225 * TSS, ++).
5226 * @param cbMem The access size.
5227 * @param pGCPtrMem Pointer to the guest memory address to apply
5228 * segmentation to. Input and output parameter.
5229 */
5230VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5231{
5232 if (iSegReg == UINT8_MAX)
5233 return VINF_SUCCESS;
5234
5235 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5236 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5237 switch (pVCpu->iem.s.enmCpuMode)
5238 {
5239 case IEMMODE_16BIT:
5240 case IEMMODE_32BIT:
5241 {
5242 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5243 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5244
5245 if ( pSel->Attr.n.u1Present
5246 && !pSel->Attr.n.u1Unusable)
5247 {
5248 Assert(pSel->Attr.n.u1DescType);
5249 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5250 {
5251 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5252 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5253 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5254
5255 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5256 {
5257 /** @todo CPL check. */
5258 }
5259
5260 /*
5261 * There are two kinds of data selectors, normal and expand down.
5262 */
5263 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5264 {
5265 if ( GCPtrFirst32 > pSel->u32Limit
5266 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5267 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5268 }
5269 else
5270 {
5271 /*
5272 * The upper boundary is defined by the B bit, not the G bit!
5273 */
5274 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5275 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5276 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5277 }
5278 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5279 }
5280 else
5281 {
5282 /*
5283 * Code selector and usually be used to read thru, writing is
5284 * only permitted in real and V8086 mode.
5285 */
5286 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5287 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5288 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5289 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5290 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5291
5292 if ( GCPtrFirst32 > pSel->u32Limit
5293 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5294 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5295
5296 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5297 {
5298 /** @todo CPL check. */
5299 }
5300
5301 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5302 }
5303 }
5304 else
5305 return iemRaiseGeneralProtectionFault0(pVCpu);
5306 return VINF_SUCCESS;
5307 }
5308
5309 case IEMMODE_64BIT:
5310 {
5311 RTGCPTR GCPtrMem = *pGCPtrMem;
5312 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5313 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5314
5315 Assert(cbMem >= 1);
5316 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5317 return VINF_SUCCESS;
5318 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5319 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5320 return iemRaiseGeneralProtectionFault0(pVCpu);
5321 }
5322
5323 default:
5324 AssertFailedReturn(VERR_IEM_IPE_7);
5325 }
5326}
5327
5328
5329/**
5330 * Translates a virtual address to a physical physical address and checks if we
5331 * can access the page as specified.
5332 *
5333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5334 * @param GCPtrMem The virtual address.
5335 * @param fAccess The intended access.
5336 * @param pGCPhysMem Where to return the physical address.
5337 */
5338VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5339{
5340 /** @todo Need a different PGM interface here. We're currently using
5341 * generic / REM interfaces. this won't cut it for R0. */
5342 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5343 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5344 * here. */
5345 PGMPTWALK Walk;
5346 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5347 if (RT_FAILURE(rc))
5348 {
5349 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5350 /** @todo Check unassigned memory in unpaged mode. */
5351 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5352#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5353 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5354 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5355#endif
5356 *pGCPhysMem = NIL_RTGCPHYS;
5357 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5358 }
5359
5360 /* If the page is writable and does not have the no-exec bit set, all
5361 access is allowed. Otherwise we'll have to check more carefully... */
5362 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5363 {
5364 /* Write to read only memory? */
5365 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5366 && !(Walk.fEffective & X86_PTE_RW)
5367 && ( ( pVCpu->iem.s.uCpl == 3
5368 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5369 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5370 {
5371 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5372 *pGCPhysMem = NIL_RTGCPHYS;
5373#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5374 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5375 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5376#endif
5377 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5378 }
5379
5380 /* Kernel memory accessed by userland? */
5381 if ( !(Walk.fEffective & X86_PTE_US)
5382 && pVCpu->iem.s.uCpl == 3
5383 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5384 {
5385 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5386 *pGCPhysMem = NIL_RTGCPHYS;
5387#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5388 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5389 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5390#endif
5391 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5392 }
5393
5394 /* Executing non-executable memory? */
5395 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5396 && (Walk.fEffective & X86_PTE_PAE_NX)
5397 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5398 {
5399 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5400 *pGCPhysMem = NIL_RTGCPHYS;
5401#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5402 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5403 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5404#endif
5405 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5406 VERR_ACCESS_DENIED);
5407 }
5408 }
5409
5410 /*
5411 * Set the dirty / access flags.
5412 * ASSUMES this is set when the address is translated rather than on committ...
5413 */
5414 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5415 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5416 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5417 {
5418 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5419 AssertRC(rc2);
5420 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5421 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5422 }
5423
5424 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5425 *pGCPhysMem = GCPhys;
5426 return VINF_SUCCESS;
5427}
5428
5429
5430/**
5431 * Looks up a memory mapping entry.
5432 *
5433 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5435 * @param pvMem The memory address.
5436 * @param fAccess The access to.
5437 */
5438DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5439{
5440 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5441 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5442 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5443 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5444 return 0;
5445 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5446 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5447 return 1;
5448 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5449 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5450 return 2;
5451 return VERR_NOT_FOUND;
5452}
5453
5454
5455/**
5456 * Finds a free memmap entry when using iNextMapping doesn't work.
5457 *
5458 * @returns Memory mapping index, 1024 on failure.
5459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5460 */
5461static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5462{
5463 /*
5464 * The easy case.
5465 */
5466 if (pVCpu->iem.s.cActiveMappings == 0)
5467 {
5468 pVCpu->iem.s.iNextMapping = 1;
5469 return 0;
5470 }
5471
5472 /* There should be enough mappings for all instructions. */
5473 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5474
5475 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5476 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5477 return i;
5478
5479 AssertFailedReturn(1024);
5480}
5481
5482
5483/**
5484 * Commits a bounce buffer that needs writing back and unmaps it.
5485 *
5486 * @returns Strict VBox status code.
5487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5488 * @param iMemMap The index of the buffer to commit.
5489 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5490 * Always false in ring-3, obviously.
5491 */
5492static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5493{
5494 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5495 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5496#ifdef IN_RING3
5497 Assert(!fPostponeFail);
5498 RT_NOREF_PV(fPostponeFail);
5499#endif
5500
5501 /*
5502 * Do the writing.
5503 */
5504 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5505 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5506 {
5507 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5508 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5509 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5510 if (!pVCpu->iem.s.fBypassHandlers)
5511 {
5512 /*
5513 * Carefully and efficiently dealing with access handler return
5514 * codes make this a little bloated.
5515 */
5516 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5517 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5518 pbBuf,
5519 cbFirst,
5520 PGMACCESSORIGIN_IEM);
5521 if (rcStrict == VINF_SUCCESS)
5522 {
5523 if (cbSecond)
5524 {
5525 rcStrict = PGMPhysWrite(pVM,
5526 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5527 pbBuf + cbFirst,
5528 cbSecond,
5529 PGMACCESSORIGIN_IEM);
5530 if (rcStrict == VINF_SUCCESS)
5531 { /* nothing */ }
5532 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5533 {
5534 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5535 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5536 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5537 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5538 }
5539#ifndef IN_RING3
5540 else if (fPostponeFail)
5541 {
5542 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5543 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5544 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5545 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5546 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5547 return iemSetPassUpStatus(pVCpu, rcStrict);
5548 }
5549#endif
5550 else
5551 {
5552 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5553 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5554 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5555 return rcStrict;
5556 }
5557 }
5558 }
5559 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5560 {
5561 if (!cbSecond)
5562 {
5563 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5564 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5565 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5566 }
5567 else
5568 {
5569 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5570 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5571 pbBuf + cbFirst,
5572 cbSecond,
5573 PGMACCESSORIGIN_IEM);
5574 if (rcStrict2 == VINF_SUCCESS)
5575 {
5576 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5577 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5578 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5579 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5580 }
5581 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5582 {
5583 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5584 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5585 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5586 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5587 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5588 }
5589#ifndef IN_RING3
5590 else if (fPostponeFail)
5591 {
5592 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5593 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5594 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5595 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5596 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5597 return iemSetPassUpStatus(pVCpu, rcStrict);
5598 }
5599#endif
5600 else
5601 {
5602 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5603 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5604 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5605 return rcStrict2;
5606 }
5607 }
5608 }
5609#ifndef IN_RING3
5610 else if (fPostponeFail)
5611 {
5612 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5613 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5614 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5615 if (!cbSecond)
5616 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5617 else
5618 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5619 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5620 return iemSetPassUpStatus(pVCpu, rcStrict);
5621 }
5622#endif
5623 else
5624 {
5625 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5626 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5627 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5628 return rcStrict;
5629 }
5630 }
5631 else
5632 {
5633 /*
5634 * No access handlers, much simpler.
5635 */
5636 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5637 if (RT_SUCCESS(rc))
5638 {
5639 if (cbSecond)
5640 {
5641 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5642 if (RT_SUCCESS(rc))
5643 { /* likely */ }
5644 else
5645 {
5646 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5647 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5648 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5649 return rc;
5650 }
5651 }
5652 }
5653 else
5654 {
5655 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5656 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5657 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5658 return rc;
5659 }
5660 }
5661 }
5662
5663#if defined(IEM_LOG_MEMORY_WRITES)
5664 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5665 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5666 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5667 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5668 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5669 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5670
5671 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5672 g_cbIemWrote = cbWrote;
5673 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5674#endif
5675
5676 /*
5677 * Free the mapping entry.
5678 */
5679 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5680 Assert(pVCpu->iem.s.cActiveMappings != 0);
5681 pVCpu->iem.s.cActiveMappings--;
5682 return VINF_SUCCESS;
5683}
5684
5685
5686/**
5687 * iemMemMap worker that deals with a request crossing pages.
5688 */
5689static VBOXSTRICTRC
5690iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5691{
5692 /*
5693 * Do the address translations.
5694 */
5695 RTGCPHYS GCPhysFirst;
5696 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5697 if (rcStrict != VINF_SUCCESS)
5698 return rcStrict;
5699
5700 RTGCPHYS GCPhysSecond;
5701 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5702 fAccess, &GCPhysSecond);
5703 if (rcStrict != VINF_SUCCESS)
5704 return rcStrict;
5705 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5706
5707 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5708
5709 /*
5710 * Read in the current memory content if it's a read, execute or partial
5711 * write access.
5712 */
5713 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5714 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5715 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5716
5717 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5718 {
5719 if (!pVCpu->iem.s.fBypassHandlers)
5720 {
5721 /*
5722 * Must carefully deal with access handler status codes here,
5723 * makes the code a bit bloated.
5724 */
5725 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5726 if (rcStrict == VINF_SUCCESS)
5727 {
5728 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5729 if (rcStrict == VINF_SUCCESS)
5730 { /*likely */ }
5731 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5732 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5733 else
5734 {
5735 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5736 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5737 return rcStrict;
5738 }
5739 }
5740 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5741 {
5742 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5743 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5744 {
5745 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5746 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5747 }
5748 else
5749 {
5750 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5751 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5752 return rcStrict2;
5753 }
5754 }
5755 else
5756 {
5757 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5758 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5759 return rcStrict;
5760 }
5761 }
5762 else
5763 {
5764 /*
5765 * No informational status codes here, much more straight forward.
5766 */
5767 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5768 if (RT_SUCCESS(rc))
5769 {
5770 Assert(rc == VINF_SUCCESS);
5771 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5772 if (RT_SUCCESS(rc))
5773 Assert(rc == VINF_SUCCESS);
5774 else
5775 {
5776 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5777 return rc;
5778 }
5779 }
5780 else
5781 {
5782 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5783 return rc;
5784 }
5785 }
5786 }
5787#ifdef VBOX_STRICT
5788 else
5789 memset(pbBuf, 0xcc, cbMem);
5790 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5791 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5792#endif
5793 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
5794
5795 /*
5796 * Commit the bounce buffer entry.
5797 */
5798 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5799 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5800 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5801 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5802 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5803 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5804 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5805 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5806 pVCpu->iem.s.cActiveMappings++;
5807
5808 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5809 *ppvMem = pbBuf;
5810 return VINF_SUCCESS;
5811}
5812
5813
5814/**
5815 * iemMemMap woker that deals with iemMemPageMap failures.
5816 */
5817static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5818 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5819{
5820 /*
5821 * Filter out conditions we can handle and the ones which shouldn't happen.
5822 */
5823 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5824 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5825 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5826 {
5827 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5828 return rcMap;
5829 }
5830 pVCpu->iem.s.cPotentialExits++;
5831
5832 /*
5833 * Read in the current memory content if it's a read, execute or partial
5834 * write access.
5835 */
5836 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5837 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5838 {
5839 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5840 memset(pbBuf, 0xff, cbMem);
5841 else
5842 {
5843 int rc;
5844 if (!pVCpu->iem.s.fBypassHandlers)
5845 {
5846 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5847 if (rcStrict == VINF_SUCCESS)
5848 { /* nothing */ }
5849 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5850 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5851 else
5852 {
5853 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5854 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5855 return rcStrict;
5856 }
5857 }
5858 else
5859 {
5860 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5861 if (RT_SUCCESS(rc))
5862 { /* likely */ }
5863 else
5864 {
5865 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5866 GCPhysFirst, rc));
5867 return rc;
5868 }
5869 }
5870 }
5871 }
5872#ifdef VBOX_STRICT
5873 else
5874 memset(pbBuf, 0xcc, cbMem);
5875#endif
5876#ifdef VBOX_STRICT
5877 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5878 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5879#endif
5880
5881 /*
5882 * Commit the bounce buffer entry.
5883 */
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5885 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5886 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5888 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5889 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5890 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5891 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5892 pVCpu->iem.s.cActiveMappings++;
5893
5894 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5895 *ppvMem = pbBuf;
5896 return VINF_SUCCESS;
5897}
5898
5899
5900
5901/**
5902 * Maps the specified guest memory for the given kind of access.
5903 *
5904 * This may be using bounce buffering of the memory if it's crossing a page
5905 * boundary or if there is an access handler installed for any of it. Because
5906 * of lock prefix guarantees, we're in for some extra clutter when this
5907 * happens.
5908 *
5909 * This may raise a \#GP, \#SS, \#PF or \#AC.
5910 *
5911 * @returns VBox strict status code.
5912 *
5913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5914 * @param ppvMem Where to return the pointer to the mapped memory.
5915 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5916 * 8, 12, 16, 32 or 512. When used by string operations
5917 * it can be up to a page.
5918 * @param iSegReg The index of the segment register to use for this
5919 * access. The base and limits are checked. Use UINT8_MAX
5920 * to indicate that no segmentation is required (for IDT,
5921 * GDT and LDT accesses).
5922 * @param GCPtrMem The address of the guest memory.
5923 * @param fAccess How the memory is being accessed. The
5924 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5925 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5926 * when raising exceptions.
5927 * @param uAlignCtl Alignment control:
5928 * - Bits 15:0 is the alignment mask.
5929 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5930 * IEM_MEMMAP_F_ALIGN_SSE, and
5931 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5932 * Pass zero to skip alignment.
5933 */
5934VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5935 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5936{
5937 /*
5938 * Check the input and figure out which mapping entry to use.
5939 */
5940 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
5941 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
5942 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
5943 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5944 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5945
5946 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5947 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5948 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5949 {
5950 iMemMap = iemMemMapFindFree(pVCpu);
5951 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5952 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5953 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5954 pVCpu->iem.s.aMemMappings[2].fAccess),
5955 VERR_IEM_IPE_9);
5956 }
5957
5958 /*
5959 * Map the memory, checking that we can actually access it. If something
5960 * slightly complicated happens, fall back on bounce buffering.
5961 */
5962 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5963 if (rcStrict == VINF_SUCCESS)
5964 { /* likely */ }
5965 else
5966 return rcStrict;
5967
5968 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5969 { /* likely */ }
5970 else
5971 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5972
5973 /*
5974 * Alignment check.
5975 */
5976 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
5977 { /* likelyish */ }
5978 else
5979 {
5980 /* Misaligned access. */
5981 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
5982 {
5983 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
5984 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
5985 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
5986 {
5987 AssertCompile(X86_CR0_AM == X86_EFL_AC);
5988
5989 if (iemMemAreAlignmentChecksEnabled(pVCpu))
5990 return iemRaiseAlignmentCheckException(pVCpu);
5991 }
5992 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
5993 && iemMemAreAlignmentChecksEnabled(pVCpu)
5994/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
5995 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
5996 )
5997 return iemRaiseAlignmentCheckException(pVCpu);
5998 else
5999 return iemRaiseGeneralProtectionFault0(pVCpu);
6000 }
6001 }
6002
6003#ifdef IEM_WITH_DATA_TLB
6004 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6005
6006 /*
6007 * Get the TLB entry for this page.
6008 */
6009 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6010 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6011 if (pTlbe->uTag == uTag)
6012 {
6013# ifdef VBOX_WITH_STATISTICS
6014 pVCpu->iem.s.DataTlb.cTlbHits++;
6015# endif
6016 }
6017 else
6018 {
6019 pVCpu->iem.s.DataTlb.cTlbMisses++;
6020 PGMPTWALK Walk;
6021 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6022 if (RT_FAILURE(rc))
6023 {
6024 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6025# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6026 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6027 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6028# endif
6029 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6030 }
6031
6032 Assert(Walk.fSucceeded);
6033 pTlbe->uTag = uTag;
6034 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6035 pTlbe->GCPhys = Walk.GCPhys;
6036 pTlbe->pbMappingR3 = NULL;
6037 }
6038
6039 /*
6040 * Check TLB page table level access flags.
6041 */
6042 /* If the page is either supervisor only or non-writable, we need to do
6043 more careful access checks. */
6044 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6045 {
6046 /* Write to read only memory? */
6047 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6048 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6049 && ( ( pVCpu->iem.s.uCpl == 3
6050 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6051 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6052 {
6053 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6054# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6055 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6056 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6057# endif
6058 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6059 }
6060
6061 /* Kernel memory accessed by userland? */
6062 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6063 && pVCpu->iem.s.uCpl == 3
6064 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6065 {
6066 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6067# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6068 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6069 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6070# endif
6071 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6072 }
6073 }
6074
6075 /*
6076 * Set the dirty / access flags.
6077 * ASSUMES this is set when the address is translated rather than on commit...
6078 */
6079 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6080 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6081 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6082 {
6083 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6084 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6085 AssertRC(rc2);
6086 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6087 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6088 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6089 }
6090
6091 /*
6092 * Look up the physical page info if necessary.
6093 */
6094 uint8_t *pbMem = NULL;
6095 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6096# ifdef IN_RING3
6097 pbMem = pTlbe->pbMappingR3;
6098# else
6099 pbMem = NULL;
6100# endif
6101 else
6102 {
6103 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6104 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6105 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6106 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6107 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6108 { /* likely */ }
6109 else
6110 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6111 pTlbe->pbMappingR3 = NULL;
6112 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6113 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6114 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6115 &pbMem, &pTlbe->fFlagsAndPhysRev);
6116 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6117# ifdef IN_RING3
6118 pTlbe->pbMappingR3 = pbMem;
6119# endif
6120 }
6121
6122 /*
6123 * Check the physical page level access and mapping.
6124 */
6125 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6126 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6127 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6128 { /* probably likely */ }
6129 else
6130 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6131 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6132 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6133 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6134 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6135 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6136
6137 if (pbMem)
6138 {
6139 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6140 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6141 fAccess |= IEM_ACCESS_NOT_LOCKED;
6142 }
6143 else
6144 {
6145 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6146 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6147 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6148 if (rcStrict != VINF_SUCCESS)
6149 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6150 }
6151
6152 void * const pvMem = pbMem;
6153
6154 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6155 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6156 if (fAccess & IEM_ACCESS_TYPE_READ)
6157 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6158
6159#else /* !IEM_WITH_DATA_TLB */
6160
6161 RTGCPHYS GCPhysFirst;
6162 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6163 if (rcStrict != VINF_SUCCESS)
6164 return rcStrict;
6165
6166 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6167 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6168 if (fAccess & IEM_ACCESS_TYPE_READ)
6169 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6170
6171 void *pvMem;
6172 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6173 if (rcStrict != VINF_SUCCESS)
6174 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6175
6176#endif /* !IEM_WITH_DATA_TLB */
6177
6178 /*
6179 * Fill in the mapping table entry.
6180 */
6181 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6182 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6183 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6184 pVCpu->iem.s.cActiveMappings += 1;
6185
6186 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6187 *ppvMem = pvMem;
6188
6189 return VINF_SUCCESS;
6190}
6191
6192
6193/**
6194 * Commits the guest memory if bounce buffered and unmaps it.
6195 *
6196 * @returns Strict VBox status code.
6197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6198 * @param pvMem The mapping.
6199 * @param fAccess The kind of access.
6200 */
6201VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6202{
6203 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6204 AssertReturn(iMemMap >= 0, iMemMap);
6205
6206 /* If it's bounce buffered, we may need to write back the buffer. */
6207 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6208 {
6209 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6210 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6211 }
6212 /* Otherwise unlock it. */
6213 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6214 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6215
6216 /* Free the entry. */
6217 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6218 Assert(pVCpu->iem.s.cActiveMappings != 0);
6219 pVCpu->iem.s.cActiveMappings--;
6220 return VINF_SUCCESS;
6221}
6222
6223#ifdef IEM_WITH_SETJMP
6224
6225/**
6226 * Maps the specified guest memory for the given kind of access, longjmp on
6227 * error.
6228 *
6229 * This may be using bounce buffering of the memory if it's crossing a page
6230 * boundary or if there is an access handler installed for any of it. Because
6231 * of lock prefix guarantees, we're in for some extra clutter when this
6232 * happens.
6233 *
6234 * This may raise a \#GP, \#SS, \#PF or \#AC.
6235 *
6236 * @returns Pointer to the mapped memory.
6237 *
6238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6239 * @param cbMem The number of bytes to map. This is usually 1,
6240 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6241 * string operations it can be up to a page.
6242 * @param iSegReg The index of the segment register to use for
6243 * this access. The base and limits are checked.
6244 * Use UINT8_MAX to indicate that no segmentation
6245 * is required (for IDT, GDT and LDT accesses).
6246 * @param GCPtrMem The address of the guest memory.
6247 * @param fAccess How the memory is being accessed. The
6248 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6249 * how to map the memory, while the
6250 * IEM_ACCESS_WHAT_XXX bit is used when raising
6251 * exceptions.
6252 * @param uAlignCtl Alignment control:
6253 * - Bits 15:0 is the alignment mask.
6254 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6255 * IEM_MEMMAP_F_ALIGN_SSE, and
6256 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6257 * Pass zero to skip alignment.
6258 */
6259void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6260 uint32_t uAlignCtl) RT_NOEXCEPT
6261{
6262 /*
6263 * Check the input, check segment access and adjust address
6264 * with segment base.
6265 */
6266 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6267 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6268 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6269
6270 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6271 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6272 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6273
6274 /*
6275 * Alignment check.
6276 */
6277 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6278 { /* likelyish */ }
6279 else
6280 {
6281 /* Misaligned access. */
6282 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6283 {
6284 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6285 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6286 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6287 {
6288 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6289
6290 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6291 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6292 }
6293 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6294 && iemMemAreAlignmentChecksEnabled(pVCpu)
6295/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6296 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6297 )
6298 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6299 else
6300 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6301 }
6302 }
6303
6304 /*
6305 * Figure out which mapping entry to use.
6306 */
6307 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6308 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6309 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6310 {
6311 iMemMap = iemMemMapFindFree(pVCpu);
6312 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6313 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6314 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6315 pVCpu->iem.s.aMemMappings[2].fAccess),
6316 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6317 }
6318
6319 /*
6320 * Crossing a page boundary?
6321 */
6322 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6323 { /* No (likely). */ }
6324 else
6325 {
6326 void *pvMem;
6327 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6328 if (rcStrict == VINF_SUCCESS)
6329 return pvMem;
6330 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6331 }
6332
6333#ifdef IEM_WITH_DATA_TLB
6334 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6335
6336 /*
6337 * Get the TLB entry for this page.
6338 */
6339 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6340 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6341 if (pTlbe->uTag == uTag)
6342 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6343 else
6344 {
6345 pVCpu->iem.s.DataTlb.cTlbMisses++;
6346 PGMPTWALK Walk;
6347 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6348 if (RT_FAILURE(rc))
6349 {
6350 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6351# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6352 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6353 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6354# endif
6355 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6356 }
6357
6358 Assert(Walk.fSucceeded);
6359 pTlbe->uTag = uTag;
6360 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6361 pTlbe->GCPhys = Walk.GCPhys;
6362 pTlbe->pbMappingR3 = NULL;
6363 }
6364
6365 /*
6366 * Check the flags and physical revision.
6367 */
6368 /** @todo make the caller pass these in with fAccess. */
6369 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6370 ? IEMTLBE_F_PT_NO_USER : 0;
6371 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6372 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6373 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6374 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6375 ? IEMTLBE_F_PT_NO_WRITE : 0)
6376 : 0;
6377 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6378 uint8_t *pbMem = NULL;
6379 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6380 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6381# ifdef IN_RING3
6382 pbMem = pTlbe->pbMappingR3;
6383# else
6384 pbMem = NULL;
6385# endif
6386 else
6387 {
6388 /*
6389 * Okay, something isn't quite right or needs refreshing.
6390 */
6391 /* Write to read only memory? */
6392 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6393 {
6394 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6395# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6396 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6397 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6398# endif
6399 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6400 }
6401
6402 /* Kernel memory accessed by userland? */
6403 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6404 {
6405 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6406# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6407 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6408 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6409# endif
6410 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6411 }
6412
6413 /* Set the dirty / access flags.
6414 ASSUMES this is set when the address is translated rather than on commit... */
6415 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6416 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6417 {
6418 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6419 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6420 AssertRC(rc2);
6421 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6422 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6423 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6424 }
6425
6426 /*
6427 * Check if the physical page info needs updating.
6428 */
6429 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6430# ifdef IN_RING3
6431 pbMem = pTlbe->pbMappingR3;
6432# else
6433 pbMem = NULL;
6434# endif
6435 else
6436 {
6437 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6438 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6439 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6440 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6441 pTlbe->pbMappingR3 = NULL;
6442 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6443 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6444 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6445 &pbMem, &pTlbe->fFlagsAndPhysRev);
6446 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6447# ifdef IN_RING3
6448 pTlbe->pbMappingR3 = pbMem;
6449# endif
6450 }
6451
6452 /*
6453 * Check the physical page level access and mapping.
6454 */
6455 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6456 { /* probably likely */ }
6457 else
6458 {
6459 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6460 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6461 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6462 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6463 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6464 if (rcStrict == VINF_SUCCESS)
6465 return pbMem;
6466 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6467 }
6468 }
6469 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6470
6471 if (pbMem)
6472 {
6473 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6474 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6475 fAccess |= IEM_ACCESS_NOT_LOCKED;
6476 }
6477 else
6478 {
6479 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6480 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6481 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6482 if (rcStrict == VINF_SUCCESS)
6483 return pbMem;
6484 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6485 }
6486
6487 void * const pvMem = pbMem;
6488
6489 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6490 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6491 if (fAccess & IEM_ACCESS_TYPE_READ)
6492 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6493
6494#else /* !IEM_WITH_DATA_TLB */
6495
6496
6497 RTGCPHYS GCPhysFirst;
6498 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6499 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6500 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6501
6502 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6503 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6504 if (fAccess & IEM_ACCESS_TYPE_READ)
6505 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6506
6507 void *pvMem;
6508 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6509 if (rcStrict == VINF_SUCCESS)
6510 { /* likely */ }
6511 else
6512 {
6513 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6514 if (rcStrict == VINF_SUCCESS)
6515 return pvMem;
6516 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6517 }
6518
6519#endif /* !IEM_WITH_DATA_TLB */
6520
6521 /*
6522 * Fill in the mapping table entry.
6523 */
6524 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6525 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6526 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6527 pVCpu->iem.s.cActiveMappings++;
6528
6529 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6530 return pvMem;
6531}
6532
6533
6534/**
6535 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6536 *
6537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6538 * @param pvMem The mapping.
6539 * @param fAccess The kind of access.
6540 */
6541void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6542{
6543 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6544 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6545
6546 /* If it's bounce buffered, we may need to write back the buffer. */
6547 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6548 {
6549 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6550 {
6551 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6552 if (rcStrict == VINF_SUCCESS)
6553 return;
6554 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6555 }
6556 }
6557 /* Otherwise unlock it. */
6558 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6559 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6560
6561 /* Free the entry. */
6562 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6563 Assert(pVCpu->iem.s.cActiveMappings != 0);
6564 pVCpu->iem.s.cActiveMappings--;
6565}
6566
6567#endif /* IEM_WITH_SETJMP */
6568
6569#ifndef IN_RING3
6570/**
6571 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6572 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6573 *
6574 * Allows the instruction to be completed and retired, while the IEM user will
6575 * return to ring-3 immediately afterwards and do the postponed writes there.
6576 *
6577 * @returns VBox status code (no strict statuses). Caller must check
6578 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6580 * @param pvMem The mapping.
6581 * @param fAccess The kind of access.
6582 */
6583VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6584{
6585 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6586 AssertReturn(iMemMap >= 0, iMemMap);
6587
6588 /* If it's bounce buffered, we may need to write back the buffer. */
6589 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6590 {
6591 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6592 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6593 }
6594 /* Otherwise unlock it. */
6595 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6596 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6597
6598 /* Free the entry. */
6599 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6600 Assert(pVCpu->iem.s.cActiveMappings != 0);
6601 pVCpu->iem.s.cActiveMappings--;
6602 return VINF_SUCCESS;
6603}
6604#endif
6605
6606
6607/**
6608 * Rollbacks mappings, releasing page locks and such.
6609 *
6610 * The caller shall only call this after checking cActiveMappings.
6611 *
6612 * @returns Strict VBox status code to pass up.
6613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6614 */
6615void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6616{
6617 Assert(pVCpu->iem.s.cActiveMappings > 0);
6618
6619 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6620 while (iMemMap-- > 0)
6621 {
6622 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6623 if (fAccess != IEM_ACCESS_INVALID)
6624 {
6625 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6626 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6627 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6628 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6629 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6630 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6631 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6632 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6633 pVCpu->iem.s.cActiveMappings--;
6634 }
6635 }
6636}
6637
6638
6639/**
6640 * Fetches a data byte.
6641 *
6642 * @returns Strict VBox status code.
6643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6644 * @param pu8Dst Where to return the byte.
6645 * @param iSegReg The index of the segment register to use for
6646 * this access. The base and limits are checked.
6647 * @param GCPtrMem The address of the guest memory.
6648 */
6649VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6650{
6651 /* The lazy approach for now... */
6652 uint8_t const *pu8Src;
6653 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6654 if (rc == VINF_SUCCESS)
6655 {
6656 *pu8Dst = *pu8Src;
6657 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6658 }
6659 return rc;
6660}
6661
6662
6663#ifdef IEM_WITH_SETJMP
6664/**
6665 * Fetches a data byte, longjmp on error.
6666 *
6667 * @returns The byte.
6668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6669 * @param iSegReg The index of the segment register to use for
6670 * this access. The base and limits are checked.
6671 * @param GCPtrMem The address of the guest memory.
6672 */
6673uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6674{
6675 /* The lazy approach for now... */
6676 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6677 uint8_t const bRet = *pu8Src;
6678 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6679 return bRet;
6680}
6681#endif /* IEM_WITH_SETJMP */
6682
6683
6684/**
6685 * Fetches a data word.
6686 *
6687 * @returns Strict VBox status code.
6688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6689 * @param pu16Dst Where to return the word.
6690 * @param iSegReg The index of the segment register to use for
6691 * this access. The base and limits are checked.
6692 * @param GCPtrMem The address of the guest memory.
6693 */
6694VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6695{
6696 /* The lazy approach for now... */
6697 uint16_t const *pu16Src;
6698 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6699 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6700 if (rc == VINF_SUCCESS)
6701 {
6702 *pu16Dst = *pu16Src;
6703 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6704 }
6705 return rc;
6706}
6707
6708
6709#ifdef IEM_WITH_SETJMP
6710/**
6711 * Fetches a data word, longjmp on error.
6712 *
6713 * @returns The word
6714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6715 * @param iSegReg The index of the segment register to use for
6716 * this access. The base and limits are checked.
6717 * @param GCPtrMem The address of the guest memory.
6718 */
6719uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6720{
6721 /* The lazy approach for now... */
6722 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6723 sizeof(*pu16Src) - 1);
6724 uint16_t const u16Ret = *pu16Src;
6725 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6726 return u16Ret;
6727}
6728#endif
6729
6730
6731/**
6732 * Fetches a data dword.
6733 *
6734 * @returns Strict VBox status code.
6735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6736 * @param pu32Dst Where to return the dword.
6737 * @param iSegReg The index of the segment register to use for
6738 * this access. The base and limits are checked.
6739 * @param GCPtrMem The address of the guest memory.
6740 */
6741VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6742{
6743 /* The lazy approach for now... */
6744 uint32_t const *pu32Src;
6745 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6746 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6747 if (rc == VINF_SUCCESS)
6748 {
6749 *pu32Dst = *pu32Src;
6750 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6751 }
6752 return rc;
6753}
6754
6755
6756/**
6757 * Fetches a data dword and zero extends it to a qword.
6758 *
6759 * @returns Strict VBox status code.
6760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6761 * @param pu64Dst Where to return the qword.
6762 * @param iSegReg The index of the segment register to use for
6763 * this access. The base and limits are checked.
6764 * @param GCPtrMem The address of the guest memory.
6765 */
6766VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6767{
6768 /* The lazy approach for now... */
6769 uint32_t const *pu32Src;
6770 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6771 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6772 if (rc == VINF_SUCCESS)
6773 {
6774 *pu64Dst = *pu32Src;
6775 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6776 }
6777 return rc;
6778}
6779
6780
6781#ifdef IEM_WITH_SETJMP
6782
6783/**
6784 * Fetches a data dword, longjmp on error, fallback/safe version.
6785 *
6786 * @returns The dword
6787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6788 * @param iSegReg The index of the segment register to use for
6789 * this access. The base and limits are checked.
6790 * @param GCPtrMem The address of the guest memory.
6791 */
6792uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6793{
6794 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6795 sizeof(*pu32Src) - 1);
6796 uint32_t const u32Ret = *pu32Src;
6797 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6798 return u32Ret;
6799}
6800
6801
6802/**
6803 * Fetches a data dword, longjmp on error.
6804 *
6805 * @returns The dword
6806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6807 * @param iSegReg The index of the segment register to use for
6808 * this access. The base and limits are checked.
6809 * @param GCPtrMem The address of the guest memory.
6810 */
6811uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6812{
6813# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6814 /*
6815 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6816 */
6817 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6818 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6819 {
6820 /*
6821 * TLB lookup.
6822 */
6823 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6824 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6825 if (pTlbe->uTag == uTag)
6826 {
6827 /*
6828 * Check TLB page table level access flags.
6829 */
6830 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6831 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6832 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6833 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6834 {
6835 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6836
6837 /*
6838 * Alignment check:
6839 */
6840 /** @todo check priority \#AC vs \#PF */
6841 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6842 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6843 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6844 || pVCpu->iem.s.uCpl != 3)
6845 {
6846 /*
6847 * Fetch and return the dword
6848 */
6849 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6850 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6851 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6852 }
6853 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6854 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6855 }
6856 }
6857 }
6858
6859 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6860 outdated page pointer, or other troubles. */
6861 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6862 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6863
6864# else
6865 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6866 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6867 uint32_t const u32Ret = *pu32Src;
6868 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6869 return u32Ret;
6870# endif
6871}
6872#endif
6873
6874
6875#ifdef SOME_UNUSED_FUNCTION
6876/**
6877 * Fetches a data dword and sign extends it to a qword.
6878 *
6879 * @returns Strict VBox status code.
6880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6881 * @param pu64Dst Where to return the sign extended value.
6882 * @param iSegReg The index of the segment register to use for
6883 * this access. The base and limits are checked.
6884 * @param GCPtrMem The address of the guest memory.
6885 */
6886VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6887{
6888 /* The lazy approach for now... */
6889 int32_t const *pi32Src;
6890 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6891 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6892 if (rc == VINF_SUCCESS)
6893 {
6894 *pu64Dst = *pi32Src;
6895 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6896 }
6897#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6898 else
6899 *pu64Dst = 0;
6900#endif
6901 return rc;
6902}
6903#endif
6904
6905
6906/**
6907 * Fetches a data qword.
6908 *
6909 * @returns Strict VBox status code.
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 * @param pu64Dst Where to return the qword.
6912 * @param iSegReg The index of the segment register to use for
6913 * this access. The base and limits are checked.
6914 * @param GCPtrMem The address of the guest memory.
6915 */
6916VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6917{
6918 /* The lazy approach for now... */
6919 uint64_t const *pu64Src;
6920 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6921 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6922 if (rc == VINF_SUCCESS)
6923 {
6924 *pu64Dst = *pu64Src;
6925 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6926 }
6927 return rc;
6928}
6929
6930
6931#ifdef IEM_WITH_SETJMP
6932/**
6933 * Fetches a data qword, longjmp on error.
6934 *
6935 * @returns The qword.
6936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6937 * @param iSegReg The index of the segment register to use for
6938 * this access. The base and limits are checked.
6939 * @param GCPtrMem The address of the guest memory.
6940 */
6941uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6942{
6943 /* The lazy approach for now... */
6944 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6945 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6946 uint64_t const u64Ret = *pu64Src;
6947 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6948 return u64Ret;
6949}
6950#endif
6951
6952
6953/**
6954 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6955 *
6956 * @returns Strict VBox status code.
6957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6958 * @param pu64Dst Where to return the qword.
6959 * @param iSegReg The index of the segment register to use for
6960 * this access. The base and limits are checked.
6961 * @param GCPtrMem The address of the guest memory.
6962 */
6963VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6964{
6965 /* The lazy approach for now... */
6966 uint64_t const *pu64Src;
6967 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6968 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6969 if (rc == VINF_SUCCESS)
6970 {
6971 *pu64Dst = *pu64Src;
6972 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6973 }
6974 return rc;
6975}
6976
6977
6978#ifdef IEM_WITH_SETJMP
6979/**
6980 * Fetches a data qword, longjmp on error.
6981 *
6982 * @returns The qword.
6983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6984 * @param iSegReg The index of the segment register to use for
6985 * this access. The base and limits are checked.
6986 * @param GCPtrMem The address of the guest memory.
6987 */
6988uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6989{
6990 /* The lazy approach for now... */
6991 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6992 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6993 uint64_t const u64Ret = *pu64Src;
6994 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6995 return u64Ret;
6996}
6997#endif
6998
6999
7000/**
7001 * Fetches a data tword.
7002 *
7003 * @returns Strict VBox status code.
7004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7005 * @param pr80Dst Where to return the tword.
7006 * @param iSegReg The index of the segment register to use for
7007 * this access. The base and limits are checked.
7008 * @param GCPtrMem The address of the guest memory.
7009 */
7010VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7011{
7012 /* The lazy approach for now... */
7013 PCRTFLOAT80U pr80Src;
7014 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7015 if (rc == VINF_SUCCESS)
7016 {
7017 *pr80Dst = *pr80Src;
7018 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7019 }
7020 return rc;
7021}
7022
7023
7024#ifdef IEM_WITH_SETJMP
7025/**
7026 * Fetches a data tword, longjmp on error.
7027 *
7028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7029 * @param pr80Dst Where to return the tword.
7030 * @param iSegReg The index of the segment register to use for
7031 * this access. The base and limits are checked.
7032 * @param GCPtrMem The address of the guest memory.
7033 */
7034void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7035{
7036 /* The lazy approach for now... */
7037 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7038 *pr80Dst = *pr80Src;
7039 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7040}
7041#endif
7042
7043
7044/**
7045 * Fetches a data decimal tword.
7046 *
7047 * @returns Strict VBox status code.
7048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7049 * @param pd80Dst Where to return the tword.
7050 * @param iSegReg The index of the segment register to use for
7051 * this access. The base and limits are checked.
7052 * @param GCPtrMem The address of the guest memory.
7053 */
7054VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7055{
7056 /* The lazy approach for now... */
7057 PCRTPBCD80U pd80Src;
7058 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7059 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7060 if (rc == VINF_SUCCESS)
7061 {
7062 *pd80Dst = *pd80Src;
7063 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7064 }
7065 return rc;
7066}
7067
7068
7069#ifdef IEM_WITH_SETJMP
7070/**
7071 * Fetches a data decimal tword, longjmp on error.
7072 *
7073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7074 * @param pd80Dst Where to return the tword.
7075 * @param iSegReg The index of the segment register to use for
7076 * this access. The base and limits are checked.
7077 * @param GCPtrMem The address of the guest memory.
7078 */
7079void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7080{
7081 /* The lazy approach for now... */
7082 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7083 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7084 *pd80Dst = *pd80Src;
7085 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7086}
7087#endif
7088
7089
7090/**
7091 * Fetches a data dqword (double qword), generally SSE related.
7092 *
7093 * @returns Strict VBox status code.
7094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7095 * @param pu128Dst Where to return the qword.
7096 * @param iSegReg The index of the segment register to use for
7097 * this access. The base and limits are checked.
7098 * @param GCPtrMem The address of the guest memory.
7099 */
7100VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7101{
7102 /* The lazy approach for now... */
7103 PCRTUINT128U pu128Src;
7104 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7105 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7106 if (rc == VINF_SUCCESS)
7107 {
7108 pu128Dst->au64[0] = pu128Src->au64[0];
7109 pu128Dst->au64[1] = pu128Src->au64[1];
7110 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7111 }
7112 return rc;
7113}
7114
7115
7116#ifdef IEM_WITH_SETJMP
7117/**
7118 * Fetches a data dqword (double qword), generally SSE related.
7119 *
7120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7121 * @param pu128Dst Where to return the qword.
7122 * @param iSegReg The index of the segment register to use for
7123 * this access. The base and limits are checked.
7124 * @param GCPtrMem The address of the guest memory.
7125 */
7126void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7127{
7128 /* The lazy approach for now... */
7129 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7130 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7131 pu128Dst->au64[0] = pu128Src->au64[0];
7132 pu128Dst->au64[1] = pu128Src->au64[1];
7133 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7134}
7135#endif
7136
7137
7138/**
7139 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7140 * related.
7141 *
7142 * Raises \#GP(0) if not aligned.
7143 *
7144 * @returns Strict VBox status code.
7145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7146 * @param pu128Dst Where to return the qword.
7147 * @param iSegReg The index of the segment register to use for
7148 * this access. The base and limits are checked.
7149 * @param GCPtrMem The address of the guest memory.
7150 */
7151VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7152{
7153 /* The lazy approach for now... */
7154 PCRTUINT128U pu128Src;
7155 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7156 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7157 if (rc == VINF_SUCCESS)
7158 {
7159 pu128Dst->au64[0] = pu128Src->au64[0];
7160 pu128Dst->au64[1] = pu128Src->au64[1];
7161 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7162 }
7163 return rc;
7164}
7165
7166
7167#ifdef IEM_WITH_SETJMP
7168/**
7169 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7170 * related, longjmp on error.
7171 *
7172 * Raises \#GP(0) if not aligned.
7173 *
7174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7175 * @param pu128Dst Where to return the qword.
7176 * @param iSegReg The index of the segment register to use for
7177 * this access. The base and limits are checked.
7178 * @param GCPtrMem The address of the guest memory.
7179 */
7180void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7181{
7182 /* The lazy approach for now... */
7183 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7184 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7185 pu128Dst->au64[0] = pu128Src->au64[0];
7186 pu128Dst->au64[1] = pu128Src->au64[1];
7187 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7188}
7189#endif
7190
7191
7192/**
7193 * Fetches a data oword (octo word), generally AVX related.
7194 *
7195 * @returns Strict VBox status code.
7196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7197 * @param pu256Dst Where to return the qword.
7198 * @param iSegReg The index of the segment register to use for
7199 * this access. The base and limits are checked.
7200 * @param GCPtrMem The address of the guest memory.
7201 */
7202VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7203{
7204 /* The lazy approach for now... */
7205 PCRTUINT256U pu256Src;
7206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7207 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7208 if (rc == VINF_SUCCESS)
7209 {
7210 pu256Dst->au64[0] = pu256Src->au64[0];
7211 pu256Dst->au64[1] = pu256Src->au64[1];
7212 pu256Dst->au64[2] = pu256Src->au64[2];
7213 pu256Dst->au64[3] = pu256Src->au64[3];
7214 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7215 }
7216 return rc;
7217}
7218
7219
7220#ifdef IEM_WITH_SETJMP
7221/**
7222 * Fetches a data oword (octo word), generally AVX related.
7223 *
7224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7225 * @param pu256Dst Where to return the qword.
7226 * @param iSegReg The index of the segment register to use for
7227 * this access. The base and limits are checked.
7228 * @param GCPtrMem The address of the guest memory.
7229 */
7230void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7231{
7232 /* The lazy approach for now... */
7233 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7234 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7235 pu256Dst->au64[0] = pu256Src->au64[0];
7236 pu256Dst->au64[1] = pu256Src->au64[1];
7237 pu256Dst->au64[2] = pu256Src->au64[2];
7238 pu256Dst->au64[3] = pu256Src->au64[3];
7239 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7240}
7241#endif
7242
7243
7244/**
7245 * Fetches a data oword (octo word) at an aligned address, generally AVX
7246 * related.
7247 *
7248 * Raises \#GP(0) if not aligned.
7249 *
7250 * @returns Strict VBox status code.
7251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7252 * @param pu256Dst Where to return the qword.
7253 * @param iSegReg The index of the segment register to use for
7254 * this access. The base and limits are checked.
7255 * @param GCPtrMem The address of the guest memory.
7256 */
7257VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7258{
7259 /* The lazy approach for now... */
7260 PCRTUINT256U pu256Src;
7261 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7262 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7263 if (rc == VINF_SUCCESS)
7264 {
7265 pu256Dst->au64[0] = pu256Src->au64[0];
7266 pu256Dst->au64[1] = pu256Src->au64[1];
7267 pu256Dst->au64[2] = pu256Src->au64[2];
7268 pu256Dst->au64[3] = pu256Src->au64[3];
7269 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7270 }
7271 return rc;
7272}
7273
7274
7275#ifdef IEM_WITH_SETJMP
7276/**
7277 * Fetches a data oword (octo word) at an aligned address, generally AVX
7278 * related, longjmp on error.
7279 *
7280 * Raises \#GP(0) if not aligned.
7281 *
7282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7283 * @param pu256Dst Where to return the qword.
7284 * @param iSegReg The index of the segment register to use for
7285 * this access. The base and limits are checked.
7286 * @param GCPtrMem The address of the guest memory.
7287 */
7288void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7289{
7290 /* The lazy approach for now... */
7291 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7292 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7293 pu256Dst->au64[0] = pu256Src->au64[0];
7294 pu256Dst->au64[1] = pu256Src->au64[1];
7295 pu256Dst->au64[2] = pu256Src->au64[2];
7296 pu256Dst->au64[3] = pu256Src->au64[3];
7297 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7298}
7299#endif
7300
7301
7302
7303/**
7304 * Fetches a descriptor register (lgdt, lidt).
7305 *
7306 * @returns Strict VBox status code.
7307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7308 * @param pcbLimit Where to return the limit.
7309 * @param pGCPtrBase Where to return the base.
7310 * @param iSegReg The index of the segment register to use for
7311 * this access. The base and limits are checked.
7312 * @param GCPtrMem The address of the guest memory.
7313 * @param enmOpSize The effective operand size.
7314 */
7315VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7316 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7317{
7318 /*
7319 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7320 * little special:
7321 * - The two reads are done separately.
7322 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7323 * - We suspect the 386 to actually commit the limit before the base in
7324 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7325 * don't try emulate this eccentric behavior, because it's not well
7326 * enough understood and rather hard to trigger.
7327 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7328 */
7329 VBOXSTRICTRC rcStrict;
7330 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7331 {
7332 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7333 if (rcStrict == VINF_SUCCESS)
7334 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7335 }
7336 else
7337 {
7338 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7339 if (enmOpSize == IEMMODE_32BIT)
7340 {
7341 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7342 {
7343 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7344 if (rcStrict == VINF_SUCCESS)
7345 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7346 }
7347 else
7348 {
7349 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7350 if (rcStrict == VINF_SUCCESS)
7351 {
7352 *pcbLimit = (uint16_t)uTmp;
7353 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7354 }
7355 }
7356 if (rcStrict == VINF_SUCCESS)
7357 *pGCPtrBase = uTmp;
7358 }
7359 else
7360 {
7361 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7362 if (rcStrict == VINF_SUCCESS)
7363 {
7364 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7365 if (rcStrict == VINF_SUCCESS)
7366 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7367 }
7368 }
7369 }
7370 return rcStrict;
7371}
7372
7373
7374
7375/**
7376 * Stores a data byte.
7377 *
7378 * @returns Strict VBox status code.
7379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7380 * @param iSegReg The index of the segment register to use for
7381 * this access. The base and limits are checked.
7382 * @param GCPtrMem The address of the guest memory.
7383 * @param u8Value The value to store.
7384 */
7385VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7386{
7387 /* The lazy approach for now... */
7388 uint8_t *pu8Dst;
7389 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7390 if (rc == VINF_SUCCESS)
7391 {
7392 *pu8Dst = u8Value;
7393 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7394 }
7395 return rc;
7396}
7397
7398
7399#ifdef IEM_WITH_SETJMP
7400/**
7401 * Stores a data byte, longjmp on error.
7402 *
7403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7404 * @param iSegReg The index of the segment register to use for
7405 * this access. The base and limits are checked.
7406 * @param GCPtrMem The address of the guest memory.
7407 * @param u8Value The value to store.
7408 */
7409void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7410{
7411 /* The lazy approach for now... */
7412 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7413 *pu8Dst = u8Value;
7414 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7415}
7416#endif
7417
7418
7419/**
7420 * Stores a data word.
7421 *
7422 * @returns Strict VBox status code.
7423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7424 * @param iSegReg The index of the segment register to use for
7425 * this access. The base and limits are checked.
7426 * @param GCPtrMem The address of the guest memory.
7427 * @param u16Value The value to store.
7428 */
7429VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7430{
7431 /* The lazy approach for now... */
7432 uint16_t *pu16Dst;
7433 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7434 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7435 if (rc == VINF_SUCCESS)
7436 {
7437 *pu16Dst = u16Value;
7438 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7439 }
7440 return rc;
7441}
7442
7443
7444#ifdef IEM_WITH_SETJMP
7445/**
7446 * Stores a data word, longjmp on error.
7447 *
7448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7449 * @param iSegReg The index of the segment register to use for
7450 * this access. The base and limits are checked.
7451 * @param GCPtrMem The address of the guest memory.
7452 * @param u16Value The value to store.
7453 */
7454void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7455{
7456 /* The lazy approach for now... */
7457 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7458 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7459 *pu16Dst = u16Value;
7460 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7461}
7462#endif
7463
7464
7465/**
7466 * Stores a data dword.
7467 *
7468 * @returns Strict VBox status code.
7469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7470 * @param iSegReg The index of the segment register to use for
7471 * this access. The base and limits are checked.
7472 * @param GCPtrMem The address of the guest memory.
7473 * @param u32Value The value to store.
7474 */
7475VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7476{
7477 /* The lazy approach for now... */
7478 uint32_t *pu32Dst;
7479 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7480 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7481 if (rc == VINF_SUCCESS)
7482 {
7483 *pu32Dst = u32Value;
7484 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7485 }
7486 return rc;
7487}
7488
7489
7490#ifdef IEM_WITH_SETJMP
7491/**
7492 * Stores a data dword.
7493 *
7494 * @returns Strict VBox status code.
7495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7496 * @param iSegReg The index of the segment register to use for
7497 * this access. The base and limits are checked.
7498 * @param GCPtrMem The address of the guest memory.
7499 * @param u32Value The value to store.
7500 */
7501void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7502{
7503 /* The lazy approach for now... */
7504 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7505 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7506 *pu32Dst = u32Value;
7507 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7508}
7509#endif
7510
7511
7512/**
7513 * Stores a data qword.
7514 *
7515 * @returns Strict VBox status code.
7516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7517 * @param iSegReg The index of the segment register to use for
7518 * this access. The base and limits are checked.
7519 * @param GCPtrMem The address of the guest memory.
7520 * @param u64Value The value to store.
7521 */
7522VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7523{
7524 /* The lazy approach for now... */
7525 uint64_t *pu64Dst;
7526 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7527 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7528 if (rc == VINF_SUCCESS)
7529 {
7530 *pu64Dst = u64Value;
7531 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7532 }
7533 return rc;
7534}
7535
7536
7537#ifdef IEM_WITH_SETJMP
7538/**
7539 * Stores a data qword, longjmp on error.
7540 *
7541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7542 * @param iSegReg The index of the segment register to use for
7543 * this access. The base and limits are checked.
7544 * @param GCPtrMem The address of the guest memory.
7545 * @param u64Value The value to store.
7546 */
7547void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7548{
7549 /* The lazy approach for now... */
7550 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7551 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7552 *pu64Dst = u64Value;
7553 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7554}
7555#endif
7556
7557
7558/**
7559 * Stores a data dqword.
7560 *
7561 * @returns Strict VBox status code.
7562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7563 * @param iSegReg The index of the segment register to use for
7564 * this access. The base and limits are checked.
7565 * @param GCPtrMem The address of the guest memory.
7566 * @param u128Value The value to store.
7567 */
7568VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7569{
7570 /* The lazy approach for now... */
7571 PRTUINT128U pu128Dst;
7572 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7573 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7574 if (rc == VINF_SUCCESS)
7575 {
7576 pu128Dst->au64[0] = u128Value.au64[0];
7577 pu128Dst->au64[1] = u128Value.au64[1];
7578 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7579 }
7580 return rc;
7581}
7582
7583
7584#ifdef IEM_WITH_SETJMP
7585/**
7586 * Stores a data dqword, longjmp on error.
7587 *
7588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7589 * @param iSegReg The index of the segment register to use for
7590 * this access. The base and limits are checked.
7591 * @param GCPtrMem The address of the guest memory.
7592 * @param u128Value The value to store.
7593 */
7594void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7595{
7596 /* The lazy approach for now... */
7597 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7598 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7599 pu128Dst->au64[0] = u128Value.au64[0];
7600 pu128Dst->au64[1] = u128Value.au64[1];
7601 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7602}
7603#endif
7604
7605
7606/**
7607 * Stores a data dqword, SSE aligned.
7608 *
7609 * @returns Strict VBox status code.
7610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7611 * @param iSegReg The index of the segment register to use for
7612 * this access. The base and limits are checked.
7613 * @param GCPtrMem The address of the guest memory.
7614 * @param u128Value The value to store.
7615 */
7616VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7617{
7618 /* The lazy approach for now... */
7619 PRTUINT128U pu128Dst;
7620 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7621 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7622 if (rc == VINF_SUCCESS)
7623 {
7624 pu128Dst->au64[0] = u128Value.au64[0];
7625 pu128Dst->au64[1] = u128Value.au64[1];
7626 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7627 }
7628 return rc;
7629}
7630
7631
7632#ifdef IEM_WITH_SETJMP
7633/**
7634 * Stores a data dqword, SSE aligned.
7635 *
7636 * @returns Strict VBox status code.
7637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7638 * @param iSegReg The index of the segment register to use for
7639 * this access. The base and limits are checked.
7640 * @param GCPtrMem The address of the guest memory.
7641 * @param u128Value The value to store.
7642 */
7643void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7644{
7645 /* The lazy approach for now... */
7646 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7647 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7648 pu128Dst->au64[0] = u128Value.au64[0];
7649 pu128Dst->au64[1] = u128Value.au64[1];
7650 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7651}
7652#endif
7653
7654
7655/**
7656 * Stores a data dqword.
7657 *
7658 * @returns Strict VBox status code.
7659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7660 * @param iSegReg The index of the segment register to use for
7661 * this access. The base and limits are checked.
7662 * @param GCPtrMem The address of the guest memory.
7663 * @param pu256Value Pointer to the value to store.
7664 */
7665VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7666{
7667 /* The lazy approach for now... */
7668 PRTUINT256U pu256Dst;
7669 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7670 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7671 if (rc == VINF_SUCCESS)
7672 {
7673 pu256Dst->au64[0] = pu256Value->au64[0];
7674 pu256Dst->au64[1] = pu256Value->au64[1];
7675 pu256Dst->au64[2] = pu256Value->au64[2];
7676 pu256Dst->au64[3] = pu256Value->au64[3];
7677 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7678 }
7679 return rc;
7680}
7681
7682
7683#ifdef IEM_WITH_SETJMP
7684/**
7685 * Stores a data dqword, longjmp on error.
7686 *
7687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7688 * @param iSegReg The index of the segment register to use for
7689 * this access. The base and limits are checked.
7690 * @param GCPtrMem The address of the guest memory.
7691 * @param pu256Value Pointer to the value to store.
7692 */
7693void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7694{
7695 /* The lazy approach for now... */
7696 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7697 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7698 pu256Dst->au64[0] = pu256Value->au64[0];
7699 pu256Dst->au64[1] = pu256Value->au64[1];
7700 pu256Dst->au64[2] = pu256Value->au64[2];
7701 pu256Dst->au64[3] = pu256Value->au64[3];
7702 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7703}
7704#endif
7705
7706
7707/**
7708 * Stores a data dqword, AVX \#GP(0) aligned.
7709 *
7710 * @returns Strict VBox status code.
7711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7712 * @param iSegReg The index of the segment register to use for
7713 * this access. The base and limits are checked.
7714 * @param GCPtrMem The address of the guest memory.
7715 * @param pu256Value Pointer to the value to store.
7716 */
7717VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7718{
7719 /* The lazy approach for now... */
7720 PRTUINT256U pu256Dst;
7721 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7722 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7723 if (rc == VINF_SUCCESS)
7724 {
7725 pu256Dst->au64[0] = pu256Value->au64[0];
7726 pu256Dst->au64[1] = pu256Value->au64[1];
7727 pu256Dst->au64[2] = pu256Value->au64[2];
7728 pu256Dst->au64[3] = pu256Value->au64[3];
7729 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7730 }
7731 return rc;
7732}
7733
7734
7735#ifdef IEM_WITH_SETJMP
7736/**
7737 * Stores a data dqword, AVX aligned.
7738 *
7739 * @returns Strict VBox status code.
7740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7741 * @param iSegReg The index of the segment register to use for
7742 * this access. The base and limits are checked.
7743 * @param GCPtrMem The address of the guest memory.
7744 * @param pu256Value Pointer to the value to store.
7745 */
7746void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7747{
7748 /* The lazy approach for now... */
7749 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7750 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7751 pu256Dst->au64[0] = pu256Value->au64[0];
7752 pu256Dst->au64[1] = pu256Value->au64[1];
7753 pu256Dst->au64[2] = pu256Value->au64[2];
7754 pu256Dst->au64[3] = pu256Value->au64[3];
7755 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7756}
7757#endif
7758
7759
7760/**
7761 * Stores a descriptor register (sgdt, sidt).
7762 *
7763 * @returns Strict VBox status code.
7764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7765 * @param cbLimit The limit.
7766 * @param GCPtrBase The base address.
7767 * @param iSegReg The index of the segment register to use for
7768 * this access. The base and limits are checked.
7769 * @param GCPtrMem The address of the guest memory.
7770 */
7771VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7772{
7773 /*
7774 * The SIDT and SGDT instructions actually stores the data using two
7775 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7776 * does not respond to opsize prefixes.
7777 */
7778 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7779 if (rcStrict == VINF_SUCCESS)
7780 {
7781 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7782 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7783 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7784 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7785 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7786 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7787 else
7788 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7789 }
7790 return rcStrict;
7791}
7792
7793
7794/**
7795 * Pushes a word onto the stack.
7796 *
7797 * @returns Strict VBox status code.
7798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7799 * @param u16Value The value to push.
7800 */
7801VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7802{
7803 /* Increment the stack pointer. */
7804 uint64_t uNewRsp;
7805 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7806
7807 /* Write the word the lazy way. */
7808 uint16_t *pu16Dst;
7809 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7810 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7811 if (rc == VINF_SUCCESS)
7812 {
7813 *pu16Dst = u16Value;
7814 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7815 }
7816
7817 /* Commit the new RSP value unless we an access handler made trouble. */
7818 if (rc == VINF_SUCCESS)
7819 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7820
7821 return rc;
7822}
7823
7824
7825/**
7826 * Pushes a dword onto the stack.
7827 *
7828 * @returns Strict VBox status code.
7829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7830 * @param u32Value The value to push.
7831 */
7832VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7833{
7834 /* Increment the stack pointer. */
7835 uint64_t uNewRsp;
7836 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7837
7838 /* Write the dword the lazy way. */
7839 uint32_t *pu32Dst;
7840 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7841 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7842 if (rc == VINF_SUCCESS)
7843 {
7844 *pu32Dst = u32Value;
7845 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7846 }
7847
7848 /* Commit the new RSP value unless we an access handler made trouble. */
7849 if (rc == VINF_SUCCESS)
7850 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7851
7852 return rc;
7853}
7854
7855
7856/**
7857 * Pushes a dword segment register value onto the stack.
7858 *
7859 * @returns Strict VBox status code.
7860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7861 * @param u32Value The value to push.
7862 */
7863VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7864{
7865 /* Increment the stack pointer. */
7866 uint64_t uNewRsp;
7867 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7868
7869 /* The intel docs talks about zero extending the selector register
7870 value. My actual intel CPU here might be zero extending the value
7871 but it still only writes the lower word... */
7872 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7873 * happens when crossing an electric page boundrary, is the high word checked
7874 * for write accessibility or not? Probably it is. What about segment limits?
7875 * It appears this behavior is also shared with trap error codes.
7876 *
7877 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7878 * ancient hardware when it actually did change. */
7879 uint16_t *pu16Dst;
7880 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7881 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7882 if (rc == VINF_SUCCESS)
7883 {
7884 *pu16Dst = (uint16_t)u32Value;
7885 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7886 }
7887
7888 /* Commit the new RSP value unless we an access handler made trouble. */
7889 if (rc == VINF_SUCCESS)
7890 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7891
7892 return rc;
7893}
7894
7895
7896/**
7897 * Pushes a qword onto the stack.
7898 *
7899 * @returns Strict VBox status code.
7900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7901 * @param u64Value The value to push.
7902 */
7903VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7904{
7905 /* Increment the stack pointer. */
7906 uint64_t uNewRsp;
7907 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7908
7909 /* Write the word the lazy way. */
7910 uint64_t *pu64Dst;
7911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7912 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7913 if (rc == VINF_SUCCESS)
7914 {
7915 *pu64Dst = u64Value;
7916 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7917 }
7918
7919 /* Commit the new RSP value unless we an access handler made trouble. */
7920 if (rc == VINF_SUCCESS)
7921 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7922
7923 return rc;
7924}
7925
7926
7927/**
7928 * Pops a word from the stack.
7929 *
7930 * @returns Strict VBox status code.
7931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7932 * @param pu16Value Where to store the popped value.
7933 */
7934VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7935{
7936 /* Increment the stack pointer. */
7937 uint64_t uNewRsp;
7938 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7939
7940 /* Write the word the lazy way. */
7941 uint16_t const *pu16Src;
7942 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7943 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7944 if (rc == VINF_SUCCESS)
7945 {
7946 *pu16Value = *pu16Src;
7947 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7948
7949 /* Commit the new RSP value. */
7950 if (rc == VINF_SUCCESS)
7951 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7952 }
7953
7954 return rc;
7955}
7956
7957
7958/**
7959 * Pops a dword from the stack.
7960 *
7961 * @returns Strict VBox status code.
7962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7963 * @param pu32Value Where to store the popped value.
7964 */
7965VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7966{
7967 /* Increment the stack pointer. */
7968 uint64_t uNewRsp;
7969 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7970
7971 /* Write the word the lazy way. */
7972 uint32_t const *pu32Src;
7973 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
7974 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
7975 if (rc == VINF_SUCCESS)
7976 {
7977 *pu32Value = *pu32Src;
7978 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7979
7980 /* Commit the new RSP value. */
7981 if (rc == VINF_SUCCESS)
7982 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7983 }
7984
7985 return rc;
7986}
7987
7988
7989/**
7990 * Pops a qword from the stack.
7991 *
7992 * @returns Strict VBox status code.
7993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7994 * @param pu64Value Where to store the popped value.
7995 */
7996VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7997{
7998 /* Increment the stack pointer. */
7999 uint64_t uNewRsp;
8000 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8001
8002 /* Write the word the lazy way. */
8003 uint64_t const *pu64Src;
8004 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8005 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8006 if (rc == VINF_SUCCESS)
8007 {
8008 *pu64Value = *pu64Src;
8009 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8010
8011 /* Commit the new RSP value. */
8012 if (rc == VINF_SUCCESS)
8013 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8014 }
8015
8016 return rc;
8017}
8018
8019
8020/**
8021 * Pushes a word onto the stack, using a temporary stack pointer.
8022 *
8023 * @returns Strict VBox status code.
8024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8025 * @param u16Value The value to push.
8026 * @param pTmpRsp Pointer to the temporary stack pointer.
8027 */
8028VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8029{
8030 /* Increment the stack pointer. */
8031 RTUINT64U NewRsp = *pTmpRsp;
8032 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8033
8034 /* Write the word the lazy way. */
8035 uint16_t *pu16Dst;
8036 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8037 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8038 if (rc == VINF_SUCCESS)
8039 {
8040 *pu16Dst = u16Value;
8041 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8042 }
8043
8044 /* Commit the new RSP value unless we an access handler made trouble. */
8045 if (rc == VINF_SUCCESS)
8046 *pTmpRsp = NewRsp;
8047
8048 return rc;
8049}
8050
8051
8052/**
8053 * Pushes a dword onto the stack, using a temporary stack pointer.
8054 *
8055 * @returns Strict VBox status code.
8056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8057 * @param u32Value The value to push.
8058 * @param pTmpRsp Pointer to the temporary stack pointer.
8059 */
8060VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8061{
8062 /* Increment the stack pointer. */
8063 RTUINT64U NewRsp = *pTmpRsp;
8064 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8065
8066 /* Write the word the lazy way. */
8067 uint32_t *pu32Dst;
8068 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8069 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8070 if (rc == VINF_SUCCESS)
8071 {
8072 *pu32Dst = u32Value;
8073 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8074 }
8075
8076 /* Commit the new RSP value unless we an access handler made trouble. */
8077 if (rc == VINF_SUCCESS)
8078 *pTmpRsp = NewRsp;
8079
8080 return rc;
8081}
8082
8083
8084/**
8085 * Pushes a dword onto the stack, using a temporary stack pointer.
8086 *
8087 * @returns Strict VBox status code.
8088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8089 * @param u64Value The value to push.
8090 * @param pTmpRsp Pointer to the temporary stack pointer.
8091 */
8092VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8093{
8094 /* Increment the stack pointer. */
8095 RTUINT64U NewRsp = *pTmpRsp;
8096 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8097
8098 /* Write the word the lazy way. */
8099 uint64_t *pu64Dst;
8100 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8101 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8102 if (rc == VINF_SUCCESS)
8103 {
8104 *pu64Dst = u64Value;
8105 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8106 }
8107
8108 /* Commit the new RSP value unless we an access handler made trouble. */
8109 if (rc == VINF_SUCCESS)
8110 *pTmpRsp = NewRsp;
8111
8112 return rc;
8113}
8114
8115
8116/**
8117 * Pops a word from the stack, using a temporary stack pointer.
8118 *
8119 * @returns Strict VBox status code.
8120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8121 * @param pu16Value Where to store the popped value.
8122 * @param pTmpRsp Pointer to the temporary stack pointer.
8123 */
8124VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8125{
8126 /* Increment the stack pointer. */
8127 RTUINT64U NewRsp = *pTmpRsp;
8128 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8129
8130 /* Write the word the lazy way. */
8131 uint16_t const *pu16Src;
8132 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8133 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8134 if (rc == VINF_SUCCESS)
8135 {
8136 *pu16Value = *pu16Src;
8137 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8138
8139 /* Commit the new RSP value. */
8140 if (rc == VINF_SUCCESS)
8141 *pTmpRsp = NewRsp;
8142 }
8143
8144 return rc;
8145}
8146
8147
8148/**
8149 * Pops a dword from the stack, using a temporary stack pointer.
8150 *
8151 * @returns Strict VBox status code.
8152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8153 * @param pu32Value Where to store the popped value.
8154 * @param pTmpRsp Pointer to the temporary stack pointer.
8155 */
8156VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8157{
8158 /* Increment the stack pointer. */
8159 RTUINT64U NewRsp = *pTmpRsp;
8160 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8161
8162 /* Write the word the lazy way. */
8163 uint32_t const *pu32Src;
8164 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8165 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8166 if (rc == VINF_SUCCESS)
8167 {
8168 *pu32Value = *pu32Src;
8169 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8170
8171 /* Commit the new RSP value. */
8172 if (rc == VINF_SUCCESS)
8173 *pTmpRsp = NewRsp;
8174 }
8175
8176 return rc;
8177}
8178
8179
8180/**
8181 * Pops a qword from the stack, using a temporary stack pointer.
8182 *
8183 * @returns Strict VBox status code.
8184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8185 * @param pu64Value Where to store the popped value.
8186 * @param pTmpRsp Pointer to the temporary stack pointer.
8187 */
8188VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8189{
8190 /* Increment the stack pointer. */
8191 RTUINT64U NewRsp = *pTmpRsp;
8192 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8193
8194 /* Write the word the lazy way. */
8195 uint64_t const *pu64Src;
8196 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8197 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8198 if (rcStrict == VINF_SUCCESS)
8199 {
8200 *pu64Value = *pu64Src;
8201 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8202
8203 /* Commit the new RSP value. */
8204 if (rcStrict == VINF_SUCCESS)
8205 *pTmpRsp = NewRsp;
8206 }
8207
8208 return rcStrict;
8209}
8210
8211
8212/**
8213 * Begin a special stack push (used by interrupt, exceptions and such).
8214 *
8215 * This will raise \#SS or \#PF if appropriate.
8216 *
8217 * @returns Strict VBox status code.
8218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8219 * @param cbMem The number of bytes to push onto the stack.
8220 * @param cbAlign The alignment mask (7, 3, 1).
8221 * @param ppvMem Where to return the pointer to the stack memory.
8222 * As with the other memory functions this could be
8223 * direct access or bounce buffered access, so
8224 * don't commit register until the commit call
8225 * succeeds.
8226 * @param puNewRsp Where to return the new RSP value. This must be
8227 * passed unchanged to
8228 * iemMemStackPushCommitSpecial().
8229 */
8230VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8231 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8232{
8233 Assert(cbMem < UINT8_MAX);
8234 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8235 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8236 IEM_ACCESS_STACK_W, cbAlign);
8237}
8238
8239
8240/**
8241 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8242 *
8243 * This will update the rSP.
8244 *
8245 * @returns Strict VBox status code.
8246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8247 * @param pvMem The pointer returned by
8248 * iemMemStackPushBeginSpecial().
8249 * @param uNewRsp The new RSP value returned by
8250 * iemMemStackPushBeginSpecial().
8251 */
8252VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8253{
8254 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8255 if (rcStrict == VINF_SUCCESS)
8256 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8257 return rcStrict;
8258}
8259
8260
8261/**
8262 * Begin a special stack pop (used by iret, retf and such).
8263 *
8264 * This will raise \#SS or \#PF if appropriate.
8265 *
8266 * @returns Strict VBox status code.
8267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8268 * @param cbMem The number of bytes to pop from the stack.
8269 * @param cbAlign The alignment mask (7, 3, 1).
8270 * @param ppvMem Where to return the pointer to the stack memory.
8271 * @param puNewRsp Where to return the new RSP value. This must be
8272 * assigned to CPUMCTX::rsp manually some time
8273 * after iemMemStackPopDoneSpecial() has been
8274 * called.
8275 */
8276VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8277 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8278{
8279 Assert(cbMem < UINT8_MAX);
8280 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8281 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8282}
8283
8284
8285/**
8286 * Continue a special stack pop (used by iret and retf), for the purpose of
8287 * retrieving a new stack pointer.
8288 *
8289 * This will raise \#SS or \#PF if appropriate.
8290 *
8291 * @returns Strict VBox status code.
8292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8293 * @param off Offset from the top of the stack. This is zero
8294 * except in the retf case.
8295 * @param cbMem The number of bytes to pop from the stack.
8296 * @param ppvMem Where to return the pointer to the stack memory.
8297 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8298 * return this because all use of this function is
8299 * to retrieve a new value and anything we return
8300 * here would be discarded.)
8301 */
8302VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8303 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8304{
8305 Assert(cbMem < UINT8_MAX);
8306
8307 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8308 RTGCPTR GCPtrTop;
8309 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8310 GCPtrTop = uCurNewRsp;
8311 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8312 GCPtrTop = (uint32_t)uCurNewRsp;
8313 else
8314 GCPtrTop = (uint16_t)uCurNewRsp;
8315
8316 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8317 0 /* checked in iemMemStackPopBeginSpecial */);
8318}
8319
8320
8321/**
8322 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8323 * iemMemStackPopContinueSpecial).
8324 *
8325 * The caller will manually commit the rSP.
8326 *
8327 * @returns Strict VBox status code.
8328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8329 * @param pvMem The pointer returned by
8330 * iemMemStackPopBeginSpecial() or
8331 * iemMemStackPopContinueSpecial().
8332 */
8333VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8334{
8335 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8336}
8337
8338
8339/**
8340 * Fetches a system table byte.
8341 *
8342 * @returns Strict VBox status code.
8343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8344 * @param pbDst Where to return the byte.
8345 * @param iSegReg The index of the segment register to use for
8346 * this access. The base and limits are checked.
8347 * @param GCPtrMem The address of the guest memory.
8348 */
8349VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8350{
8351 /* The lazy approach for now... */
8352 uint8_t const *pbSrc;
8353 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8354 if (rc == VINF_SUCCESS)
8355 {
8356 *pbDst = *pbSrc;
8357 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8358 }
8359 return rc;
8360}
8361
8362
8363/**
8364 * Fetches a system table word.
8365 *
8366 * @returns Strict VBox status code.
8367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8368 * @param pu16Dst Where to return the word.
8369 * @param iSegReg The index of the segment register to use for
8370 * this access. The base and limits are checked.
8371 * @param GCPtrMem The address of the guest memory.
8372 */
8373VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8374{
8375 /* The lazy approach for now... */
8376 uint16_t const *pu16Src;
8377 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8378 if (rc == VINF_SUCCESS)
8379 {
8380 *pu16Dst = *pu16Src;
8381 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8382 }
8383 return rc;
8384}
8385
8386
8387/**
8388 * Fetches a system table dword.
8389 *
8390 * @returns Strict VBox status code.
8391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8392 * @param pu32Dst Where to return the dword.
8393 * @param iSegReg The index of the segment register to use for
8394 * this access. The base and limits are checked.
8395 * @param GCPtrMem The address of the guest memory.
8396 */
8397VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8398{
8399 /* The lazy approach for now... */
8400 uint32_t const *pu32Src;
8401 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8402 if (rc == VINF_SUCCESS)
8403 {
8404 *pu32Dst = *pu32Src;
8405 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8406 }
8407 return rc;
8408}
8409
8410
8411/**
8412 * Fetches a system table qword.
8413 *
8414 * @returns Strict VBox status code.
8415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8416 * @param pu64Dst Where to return the qword.
8417 * @param iSegReg The index of the segment register to use for
8418 * this access. The base and limits are checked.
8419 * @param GCPtrMem The address of the guest memory.
8420 */
8421VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8422{
8423 /* The lazy approach for now... */
8424 uint64_t const *pu64Src;
8425 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8426 if (rc == VINF_SUCCESS)
8427 {
8428 *pu64Dst = *pu64Src;
8429 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8430 }
8431 return rc;
8432}
8433
8434
8435/**
8436 * Fetches a descriptor table entry with caller specified error code.
8437 *
8438 * @returns Strict VBox status code.
8439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8440 * @param pDesc Where to return the descriptor table entry.
8441 * @param uSel The selector which table entry to fetch.
8442 * @param uXcpt The exception to raise on table lookup error.
8443 * @param uErrorCode The error code associated with the exception.
8444 */
8445static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8446 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8447{
8448 AssertPtr(pDesc);
8449 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8450
8451 /** @todo did the 286 require all 8 bytes to be accessible? */
8452 /*
8453 * Get the selector table base and check bounds.
8454 */
8455 RTGCPTR GCPtrBase;
8456 if (uSel & X86_SEL_LDT)
8457 {
8458 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8459 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8460 {
8461 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8462 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8463 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8464 uErrorCode, 0);
8465 }
8466
8467 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8468 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8469 }
8470 else
8471 {
8472 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8473 {
8474 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8475 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8476 uErrorCode, 0);
8477 }
8478 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8479 }
8480
8481 /*
8482 * Read the legacy descriptor and maybe the long mode extensions if
8483 * required.
8484 */
8485 VBOXSTRICTRC rcStrict;
8486 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8487 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8488 else
8489 {
8490 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8491 if (rcStrict == VINF_SUCCESS)
8492 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8493 if (rcStrict == VINF_SUCCESS)
8494 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8495 if (rcStrict == VINF_SUCCESS)
8496 pDesc->Legacy.au16[3] = 0;
8497 else
8498 return rcStrict;
8499 }
8500
8501 if (rcStrict == VINF_SUCCESS)
8502 {
8503 if ( !IEM_IS_LONG_MODE(pVCpu)
8504 || pDesc->Legacy.Gen.u1DescType)
8505 pDesc->Long.au64[1] = 0;
8506 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8507 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8508 else
8509 {
8510 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8511 /** @todo is this the right exception? */
8512 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8513 }
8514 }
8515 return rcStrict;
8516}
8517
8518
8519/**
8520 * Fetches a descriptor table entry.
8521 *
8522 * @returns Strict VBox status code.
8523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8524 * @param pDesc Where to return the descriptor table entry.
8525 * @param uSel The selector which table entry to fetch.
8526 * @param uXcpt The exception to raise on table lookup error.
8527 */
8528VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8529{
8530 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8531}
8532
8533
8534/**
8535 * Marks the selector descriptor as accessed (only non-system descriptors).
8536 *
8537 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8538 * will therefore skip the limit checks.
8539 *
8540 * @returns Strict VBox status code.
8541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8542 * @param uSel The selector.
8543 */
8544VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8545{
8546 /*
8547 * Get the selector table base and calculate the entry address.
8548 */
8549 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8550 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8551 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8552 GCPtr += uSel & X86_SEL_MASK;
8553
8554 /*
8555 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8556 * ugly stuff to avoid this. This will make sure it's an atomic access
8557 * as well more or less remove any question about 8-bit or 32-bit accesss.
8558 */
8559 VBOXSTRICTRC rcStrict;
8560 uint32_t volatile *pu32;
8561 if ((GCPtr & 3) == 0)
8562 {
8563 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8564 GCPtr += 2 + 2;
8565 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8566 if (rcStrict != VINF_SUCCESS)
8567 return rcStrict;
8568 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8569 }
8570 else
8571 {
8572 /* The misaligned GDT/LDT case, map the whole thing. */
8573 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8574 if (rcStrict != VINF_SUCCESS)
8575 return rcStrict;
8576 switch ((uintptr_t)pu32 & 3)
8577 {
8578 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8579 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8580 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8581 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8582 }
8583 }
8584
8585 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8586}
8587
8588/** @} */
8589
8590/** @name Opcode Helpers.
8591 * @{
8592 */
8593
8594/**
8595 * Calculates the effective address of a ModR/M memory operand.
8596 *
8597 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8598 *
8599 * @return Strict VBox status code.
8600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8601 * @param bRm The ModRM byte.
8602 * @param cbImm The size of any immediate following the
8603 * effective address opcode bytes. Important for
8604 * RIP relative addressing.
8605 * @param pGCPtrEff Where to return the effective address.
8606 */
8607VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8608{
8609 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8610# define SET_SS_DEF() \
8611 do \
8612 { \
8613 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8614 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8615 } while (0)
8616
8617 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8618 {
8619/** @todo Check the effective address size crap! */
8620 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8621 {
8622 uint16_t u16EffAddr;
8623
8624 /* Handle the disp16 form with no registers first. */
8625 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8626 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8627 else
8628 {
8629 /* Get the displacment. */
8630 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8631 {
8632 case 0: u16EffAddr = 0; break;
8633 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8634 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8635 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8636 }
8637
8638 /* Add the base and index registers to the disp. */
8639 switch (bRm & X86_MODRM_RM_MASK)
8640 {
8641 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8642 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8643 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8644 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8645 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8646 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8647 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8648 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8649 }
8650 }
8651
8652 *pGCPtrEff = u16EffAddr;
8653 }
8654 else
8655 {
8656 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8657 uint32_t u32EffAddr;
8658
8659 /* Handle the disp32 form with no registers first. */
8660 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8661 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8662 else
8663 {
8664 /* Get the register (or SIB) value. */
8665 switch ((bRm & X86_MODRM_RM_MASK))
8666 {
8667 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8668 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8669 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8670 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8671 case 4: /* SIB */
8672 {
8673 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8674
8675 /* Get the index and scale it. */
8676 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8677 {
8678 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8679 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8680 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8681 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8682 case 4: u32EffAddr = 0; /*none */ break;
8683 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8684 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8685 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8686 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8687 }
8688 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8689
8690 /* add base */
8691 switch (bSib & X86_SIB_BASE_MASK)
8692 {
8693 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8694 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8695 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8696 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8697 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8698 case 5:
8699 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8700 {
8701 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8702 SET_SS_DEF();
8703 }
8704 else
8705 {
8706 uint32_t u32Disp;
8707 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8708 u32EffAddr += u32Disp;
8709 }
8710 break;
8711 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8712 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8713 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8714 }
8715 break;
8716 }
8717 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8718 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8719 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8720 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8721 }
8722
8723 /* Get and add the displacement. */
8724 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8725 {
8726 case 0:
8727 break;
8728 case 1:
8729 {
8730 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8731 u32EffAddr += i8Disp;
8732 break;
8733 }
8734 case 2:
8735 {
8736 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8737 u32EffAddr += u32Disp;
8738 break;
8739 }
8740 default:
8741 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8742 }
8743
8744 }
8745 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8746 *pGCPtrEff = u32EffAddr;
8747 else
8748 {
8749 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8750 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8751 }
8752 }
8753 }
8754 else
8755 {
8756 uint64_t u64EffAddr;
8757
8758 /* Handle the rip+disp32 form with no registers first. */
8759 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8760 {
8761 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8762 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8763 }
8764 else
8765 {
8766 /* Get the register (or SIB) value. */
8767 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8768 {
8769 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8770 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8771 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8772 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8773 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8774 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8775 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8776 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8777 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8778 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8779 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8780 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8781 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8782 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8783 /* SIB */
8784 case 4:
8785 case 12:
8786 {
8787 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8788
8789 /* Get the index and scale it. */
8790 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8791 {
8792 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8793 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8794 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8795 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8796 case 4: u64EffAddr = 0; /*none */ break;
8797 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8798 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8799 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8800 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8801 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8802 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8803 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8804 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8805 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8806 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8807 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8808 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8809 }
8810 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8811
8812 /* add base */
8813 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8814 {
8815 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8816 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8817 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8818 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8819 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8820 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8821 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8822 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8823 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8824 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8825 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8826 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8827 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8828 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8829 /* complicated encodings */
8830 case 5:
8831 case 13:
8832 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8833 {
8834 if (!pVCpu->iem.s.uRexB)
8835 {
8836 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8837 SET_SS_DEF();
8838 }
8839 else
8840 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8841 }
8842 else
8843 {
8844 uint32_t u32Disp;
8845 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8846 u64EffAddr += (int32_t)u32Disp;
8847 }
8848 break;
8849 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8850 }
8851 break;
8852 }
8853 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8854 }
8855
8856 /* Get and add the displacement. */
8857 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8858 {
8859 case 0:
8860 break;
8861 case 1:
8862 {
8863 int8_t i8Disp;
8864 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8865 u64EffAddr += i8Disp;
8866 break;
8867 }
8868 case 2:
8869 {
8870 uint32_t u32Disp;
8871 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8872 u64EffAddr += (int32_t)u32Disp;
8873 break;
8874 }
8875 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8876 }
8877
8878 }
8879
8880 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8881 *pGCPtrEff = u64EffAddr;
8882 else
8883 {
8884 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8885 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8886 }
8887 }
8888
8889 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8890 return VINF_SUCCESS;
8891}
8892
8893
8894/**
8895 * Calculates the effective address of a ModR/M memory operand.
8896 *
8897 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8898 *
8899 * @return Strict VBox status code.
8900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8901 * @param bRm The ModRM byte.
8902 * @param cbImm The size of any immediate following the
8903 * effective address opcode bytes. Important for
8904 * RIP relative addressing.
8905 * @param pGCPtrEff Where to return the effective address.
8906 * @param offRsp RSP displacement.
8907 */
8908VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8909{
8910 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8911# define SET_SS_DEF() \
8912 do \
8913 { \
8914 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8915 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8916 } while (0)
8917
8918 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8919 {
8920/** @todo Check the effective address size crap! */
8921 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8922 {
8923 uint16_t u16EffAddr;
8924
8925 /* Handle the disp16 form with no registers first. */
8926 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8927 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8928 else
8929 {
8930 /* Get the displacment. */
8931 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8932 {
8933 case 0: u16EffAddr = 0; break;
8934 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8935 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8936 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8937 }
8938
8939 /* Add the base and index registers to the disp. */
8940 switch (bRm & X86_MODRM_RM_MASK)
8941 {
8942 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8943 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8944 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8945 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8946 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8947 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8948 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8949 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8950 }
8951 }
8952
8953 *pGCPtrEff = u16EffAddr;
8954 }
8955 else
8956 {
8957 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8958 uint32_t u32EffAddr;
8959
8960 /* Handle the disp32 form with no registers first. */
8961 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8962 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8963 else
8964 {
8965 /* Get the register (or SIB) value. */
8966 switch ((bRm & X86_MODRM_RM_MASK))
8967 {
8968 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8969 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8970 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8971 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8972 case 4: /* SIB */
8973 {
8974 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8975
8976 /* Get the index and scale it. */
8977 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8978 {
8979 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8980 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8981 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8982 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8983 case 4: u32EffAddr = 0; /*none */ break;
8984 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8985 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8986 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8987 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8988 }
8989 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8990
8991 /* add base */
8992 switch (bSib & X86_SIB_BASE_MASK)
8993 {
8994 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8995 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8996 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8997 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8998 case 4:
8999 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
9000 SET_SS_DEF();
9001 break;
9002 case 5:
9003 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9004 {
9005 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9006 SET_SS_DEF();
9007 }
9008 else
9009 {
9010 uint32_t u32Disp;
9011 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9012 u32EffAddr += u32Disp;
9013 }
9014 break;
9015 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9016 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9017 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9018 }
9019 break;
9020 }
9021 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9022 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9023 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9024 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9025 }
9026
9027 /* Get and add the displacement. */
9028 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9029 {
9030 case 0:
9031 break;
9032 case 1:
9033 {
9034 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9035 u32EffAddr += i8Disp;
9036 break;
9037 }
9038 case 2:
9039 {
9040 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9041 u32EffAddr += u32Disp;
9042 break;
9043 }
9044 default:
9045 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9046 }
9047
9048 }
9049 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9050 *pGCPtrEff = u32EffAddr;
9051 else
9052 {
9053 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9054 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9055 }
9056 }
9057 }
9058 else
9059 {
9060 uint64_t u64EffAddr;
9061
9062 /* Handle the rip+disp32 form with no registers first. */
9063 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9064 {
9065 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9066 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9067 }
9068 else
9069 {
9070 /* Get the register (or SIB) value. */
9071 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9072 {
9073 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9074 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9075 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9076 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9077 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9078 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9079 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9080 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9081 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9082 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9083 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9084 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9085 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9086 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9087 /* SIB */
9088 case 4:
9089 case 12:
9090 {
9091 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9092
9093 /* Get the index and scale it. */
9094 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9095 {
9096 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9097 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9098 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9099 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9100 case 4: u64EffAddr = 0; /*none */ break;
9101 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9102 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9103 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9104 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9105 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9106 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9107 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9108 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9109 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9110 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9111 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9112 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9113 }
9114 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9115
9116 /* add base */
9117 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9118 {
9119 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9120 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9121 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9122 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9123 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9124 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9125 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9126 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9127 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9128 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9129 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9130 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9131 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9132 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9133 /* complicated encodings */
9134 case 5:
9135 case 13:
9136 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9137 {
9138 if (!pVCpu->iem.s.uRexB)
9139 {
9140 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9141 SET_SS_DEF();
9142 }
9143 else
9144 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9145 }
9146 else
9147 {
9148 uint32_t u32Disp;
9149 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9150 u64EffAddr += (int32_t)u32Disp;
9151 }
9152 break;
9153 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9154 }
9155 break;
9156 }
9157 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9158 }
9159
9160 /* Get and add the displacement. */
9161 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9162 {
9163 case 0:
9164 break;
9165 case 1:
9166 {
9167 int8_t i8Disp;
9168 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9169 u64EffAddr += i8Disp;
9170 break;
9171 }
9172 case 2:
9173 {
9174 uint32_t u32Disp;
9175 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9176 u64EffAddr += (int32_t)u32Disp;
9177 break;
9178 }
9179 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9180 }
9181
9182 }
9183
9184 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9185 *pGCPtrEff = u64EffAddr;
9186 else
9187 {
9188 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9189 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9190 }
9191 }
9192
9193 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9194 return VINF_SUCCESS;
9195}
9196
9197
9198#ifdef IEM_WITH_SETJMP
9199/**
9200 * Calculates the effective address of a ModR/M memory operand.
9201 *
9202 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9203 *
9204 * May longjmp on internal error.
9205 *
9206 * @return The effective address.
9207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9208 * @param bRm The ModRM byte.
9209 * @param cbImm The size of any immediate following the
9210 * effective address opcode bytes. Important for
9211 * RIP relative addressing.
9212 */
9213RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9214{
9215 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9216# define SET_SS_DEF() \
9217 do \
9218 { \
9219 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9220 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9221 } while (0)
9222
9223 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9224 {
9225/** @todo Check the effective address size crap! */
9226 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9227 {
9228 uint16_t u16EffAddr;
9229
9230 /* Handle the disp16 form with no registers first. */
9231 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9232 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9233 else
9234 {
9235 /* Get the displacment. */
9236 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9237 {
9238 case 0: u16EffAddr = 0; break;
9239 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9240 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9241 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9242 }
9243
9244 /* Add the base and index registers to the disp. */
9245 switch (bRm & X86_MODRM_RM_MASK)
9246 {
9247 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9248 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9249 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9250 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9251 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9252 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9253 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9254 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9255 }
9256 }
9257
9258 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9259 return u16EffAddr;
9260 }
9261
9262 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9263 uint32_t u32EffAddr;
9264
9265 /* Handle the disp32 form with no registers first. */
9266 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9267 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9268 else
9269 {
9270 /* Get the register (or SIB) value. */
9271 switch ((bRm & X86_MODRM_RM_MASK))
9272 {
9273 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9274 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9275 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9276 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9277 case 4: /* SIB */
9278 {
9279 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9280
9281 /* Get the index and scale it. */
9282 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9283 {
9284 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9285 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9286 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9287 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9288 case 4: u32EffAddr = 0; /*none */ break;
9289 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9290 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9291 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9292 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9293 }
9294 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9295
9296 /* add base */
9297 switch (bSib & X86_SIB_BASE_MASK)
9298 {
9299 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9300 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9301 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9302 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9303 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9304 case 5:
9305 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9306 {
9307 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9308 SET_SS_DEF();
9309 }
9310 else
9311 {
9312 uint32_t u32Disp;
9313 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9314 u32EffAddr += u32Disp;
9315 }
9316 break;
9317 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9318 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9319 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9320 }
9321 break;
9322 }
9323 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9324 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9325 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9326 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9327 }
9328
9329 /* Get and add the displacement. */
9330 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9331 {
9332 case 0:
9333 break;
9334 case 1:
9335 {
9336 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9337 u32EffAddr += i8Disp;
9338 break;
9339 }
9340 case 2:
9341 {
9342 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9343 u32EffAddr += u32Disp;
9344 break;
9345 }
9346 default:
9347 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9348 }
9349 }
9350
9351 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9352 {
9353 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9354 return u32EffAddr;
9355 }
9356 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9357 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9358 return u32EffAddr & UINT16_MAX;
9359 }
9360
9361 uint64_t u64EffAddr;
9362
9363 /* Handle the rip+disp32 form with no registers first. */
9364 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9365 {
9366 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9367 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9368 }
9369 else
9370 {
9371 /* Get the register (or SIB) value. */
9372 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9373 {
9374 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9375 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9376 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9377 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9378 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9379 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9380 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9381 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9382 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9383 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9384 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9385 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9386 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9387 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9388 /* SIB */
9389 case 4:
9390 case 12:
9391 {
9392 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9393
9394 /* Get the index and scale it. */
9395 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9396 {
9397 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9398 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9399 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9400 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9401 case 4: u64EffAddr = 0; /*none */ break;
9402 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9403 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9404 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9405 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9406 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9407 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9408 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9409 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9410 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9411 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9412 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9413 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9414 }
9415 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9416
9417 /* add base */
9418 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9419 {
9420 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9421 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9422 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9423 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9424 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9425 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9426 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9427 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9428 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9429 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9430 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9431 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9432 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9433 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9434 /* complicated encodings */
9435 case 5:
9436 case 13:
9437 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9438 {
9439 if (!pVCpu->iem.s.uRexB)
9440 {
9441 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9442 SET_SS_DEF();
9443 }
9444 else
9445 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9446 }
9447 else
9448 {
9449 uint32_t u32Disp;
9450 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9451 u64EffAddr += (int32_t)u32Disp;
9452 }
9453 break;
9454 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9455 }
9456 break;
9457 }
9458 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9459 }
9460
9461 /* Get and add the displacement. */
9462 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9463 {
9464 case 0:
9465 break;
9466 case 1:
9467 {
9468 int8_t i8Disp;
9469 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9470 u64EffAddr += i8Disp;
9471 break;
9472 }
9473 case 2:
9474 {
9475 uint32_t u32Disp;
9476 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9477 u64EffAddr += (int32_t)u32Disp;
9478 break;
9479 }
9480 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9481 }
9482
9483 }
9484
9485 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9486 {
9487 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9488 return u64EffAddr;
9489 }
9490 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9491 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9492 return u64EffAddr & UINT32_MAX;
9493}
9494#endif /* IEM_WITH_SETJMP */
9495
9496/** @} */
9497
9498
9499#ifdef LOG_ENABLED
9500/**
9501 * Logs the current instruction.
9502 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9503 * @param fSameCtx Set if we have the same context information as the VMM,
9504 * clear if we may have already executed an instruction in
9505 * our debug context. When clear, we assume IEMCPU holds
9506 * valid CPU mode info.
9507 *
9508 * The @a fSameCtx parameter is now misleading and obsolete.
9509 * @param pszFunction The IEM function doing the execution.
9510 */
9511static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9512{
9513# ifdef IN_RING3
9514 if (LogIs2Enabled())
9515 {
9516 char szInstr[256];
9517 uint32_t cbInstr = 0;
9518 if (fSameCtx)
9519 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9520 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9521 szInstr, sizeof(szInstr), &cbInstr);
9522 else
9523 {
9524 uint32_t fFlags = 0;
9525 switch (pVCpu->iem.s.enmCpuMode)
9526 {
9527 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9528 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9529 case IEMMODE_16BIT:
9530 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9531 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9532 else
9533 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9534 break;
9535 }
9536 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9537 szInstr, sizeof(szInstr), &cbInstr);
9538 }
9539
9540 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9541 Log2(("**** %s\n"
9542 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9543 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9544 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9545 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9546 " %s\n"
9547 , pszFunction,
9548 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9549 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9550 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9551 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9552 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9553 szInstr));
9554
9555 if (LogIs3Enabled())
9556 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9557 }
9558 else
9559# endif
9560 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9561 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9562 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9563}
9564#endif /* LOG_ENABLED */
9565
9566
9567#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9568/**
9569 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9570 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9571 *
9572 * @returns Modified rcStrict.
9573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9574 * @param rcStrict The instruction execution status.
9575 */
9576static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9577{
9578 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9579 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9580 {
9581 /* VMX preemption timer takes priority over NMI-window exits. */
9582 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9583 {
9584 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9585 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9586 }
9587 /*
9588 * Check remaining intercepts.
9589 *
9590 * NMI-window and Interrupt-window VM-exits.
9591 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9592 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9593 *
9594 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9595 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9596 */
9597 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9598 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9599 && !TRPMHasTrap(pVCpu))
9600 {
9601 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9602 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9603 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9604 {
9605 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9606 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9607 }
9608 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9609 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9610 {
9611 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9612 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9613 }
9614 }
9615 }
9616 /* TPR-below threshold/APIC write has the highest priority. */
9617 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9618 {
9619 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9620 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9621 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9622 }
9623 /* MTF takes priority over VMX-preemption timer. */
9624 else
9625 {
9626 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9627 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9628 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9629 }
9630 return rcStrict;
9631}
9632#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9633
9634
9635/**
9636 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9637 * IEMExecOneWithPrefetchedByPC.
9638 *
9639 * Similar code is found in IEMExecLots.
9640 *
9641 * @return Strict VBox status code.
9642 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9643 * @param fExecuteInhibit If set, execute the instruction following CLI,
9644 * POP SS and MOV SS,GR.
9645 * @param pszFunction The calling function name.
9646 */
9647DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9648{
9649 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9650 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9651 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9652 RT_NOREF_PV(pszFunction);
9653
9654#ifdef IEM_WITH_SETJMP
9655 VBOXSTRICTRC rcStrict;
9656 jmp_buf JmpBuf;
9657 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9658 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9659 if ((rcStrict = setjmp(JmpBuf)) == 0)
9660 {
9661 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9662 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9663 }
9664 else
9665 pVCpu->iem.s.cLongJumps++;
9666 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9667#else
9668 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9669 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9670#endif
9671 if (rcStrict == VINF_SUCCESS)
9672 pVCpu->iem.s.cInstructions++;
9673 if (pVCpu->iem.s.cActiveMappings > 0)
9674 {
9675 Assert(rcStrict != VINF_SUCCESS);
9676 iemMemRollback(pVCpu);
9677 }
9678 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9679 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9680 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9681
9682//#ifdef DEBUG
9683// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9684//#endif
9685
9686#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9687 /*
9688 * Perform any VMX nested-guest instruction boundary actions.
9689 *
9690 * If any of these causes a VM-exit, we must skip executing the next
9691 * instruction (would run into stale page tables). A VM-exit makes sure
9692 * there is no interrupt-inhibition, so that should ensure we don't go
9693 * to try execute the next instruction. Clearing fExecuteInhibit is
9694 * problematic because of the setjmp/longjmp clobbering above.
9695 */
9696 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9697 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9698 || rcStrict != VINF_SUCCESS)
9699 { /* likely */ }
9700 else
9701 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9702#endif
9703
9704 /* Execute the next instruction as well if a cli, pop ss or
9705 mov ss, Gr has just completed successfully. */
9706 if ( fExecuteInhibit
9707 && rcStrict == VINF_SUCCESS
9708 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9709 && EMIsInhibitInterruptsActive(pVCpu))
9710 {
9711 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9712 if (rcStrict == VINF_SUCCESS)
9713 {
9714#ifdef LOG_ENABLED
9715 iemLogCurInstr(pVCpu, false, pszFunction);
9716#endif
9717#ifdef IEM_WITH_SETJMP
9718 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9719 if ((rcStrict = setjmp(JmpBuf)) == 0)
9720 {
9721 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9722 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9723 }
9724 else
9725 pVCpu->iem.s.cLongJumps++;
9726 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9727#else
9728 IEM_OPCODE_GET_NEXT_U8(&b);
9729 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9730#endif
9731 if (rcStrict == VINF_SUCCESS)
9732 {
9733 pVCpu->iem.s.cInstructions++;
9734#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9735 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9736 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9737 { /* likely */ }
9738 else
9739 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9740#endif
9741 }
9742 if (pVCpu->iem.s.cActiveMappings > 0)
9743 {
9744 Assert(rcStrict != VINF_SUCCESS);
9745 iemMemRollback(pVCpu);
9746 }
9747 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9748 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9749 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9750 }
9751 else if (pVCpu->iem.s.cActiveMappings > 0)
9752 iemMemRollback(pVCpu);
9753 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9754 }
9755
9756 /*
9757 * Return value fiddling, statistics and sanity assertions.
9758 */
9759 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9760
9761 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9762 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9763 return rcStrict;
9764}
9765
9766
9767/**
9768 * Execute one instruction.
9769 *
9770 * @return Strict VBox status code.
9771 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9772 */
9773VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9774{
9775 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9776#ifdef LOG_ENABLED
9777 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9778#endif
9779
9780 /*
9781 * Do the decoding and emulation.
9782 */
9783 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9784 if (rcStrict == VINF_SUCCESS)
9785 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9786 else if (pVCpu->iem.s.cActiveMappings > 0)
9787 iemMemRollback(pVCpu);
9788
9789 if (rcStrict != VINF_SUCCESS)
9790 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9791 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9792 return rcStrict;
9793}
9794
9795
9796VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9797{
9798 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9799
9800 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9801 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9802 if (rcStrict == VINF_SUCCESS)
9803 {
9804 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9805 if (pcbWritten)
9806 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9807 }
9808 else if (pVCpu->iem.s.cActiveMappings > 0)
9809 iemMemRollback(pVCpu);
9810
9811 return rcStrict;
9812}
9813
9814
9815VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9816 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9817{
9818 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9819
9820 VBOXSTRICTRC rcStrict;
9821 if ( cbOpcodeBytes
9822 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9823 {
9824 iemInitDecoder(pVCpu, false, false);
9825#ifdef IEM_WITH_CODE_TLB
9826 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9827 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9828 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9829 pVCpu->iem.s.offCurInstrStart = 0;
9830 pVCpu->iem.s.offInstrNextByte = 0;
9831#else
9832 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9833 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9834#endif
9835 rcStrict = VINF_SUCCESS;
9836 }
9837 else
9838 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9839 if (rcStrict == VINF_SUCCESS)
9840 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9841 else if (pVCpu->iem.s.cActiveMappings > 0)
9842 iemMemRollback(pVCpu);
9843
9844 return rcStrict;
9845}
9846
9847
9848VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9849{
9850 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9851
9852 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9853 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9854 if (rcStrict == VINF_SUCCESS)
9855 {
9856 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9857 if (pcbWritten)
9858 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9859 }
9860 else if (pVCpu->iem.s.cActiveMappings > 0)
9861 iemMemRollback(pVCpu);
9862
9863 return rcStrict;
9864}
9865
9866
9867VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9868 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9869{
9870 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9871
9872 VBOXSTRICTRC rcStrict;
9873 if ( cbOpcodeBytes
9874 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9875 {
9876 iemInitDecoder(pVCpu, true, false);
9877#ifdef IEM_WITH_CODE_TLB
9878 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9879 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9880 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9881 pVCpu->iem.s.offCurInstrStart = 0;
9882 pVCpu->iem.s.offInstrNextByte = 0;
9883#else
9884 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9885 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9886#endif
9887 rcStrict = VINF_SUCCESS;
9888 }
9889 else
9890 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9891 if (rcStrict == VINF_SUCCESS)
9892 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9893 else if (pVCpu->iem.s.cActiveMappings > 0)
9894 iemMemRollback(pVCpu);
9895
9896 return rcStrict;
9897}
9898
9899
9900/**
9901 * For debugging DISGetParamSize, may come in handy.
9902 *
9903 * @returns Strict VBox status code.
9904 * @param pVCpu The cross context virtual CPU structure of the
9905 * calling EMT.
9906 * @param pCtxCore The context core structure.
9907 * @param OpcodeBytesPC The PC of the opcode bytes.
9908 * @param pvOpcodeBytes Prefeched opcode bytes.
9909 * @param cbOpcodeBytes Number of prefetched bytes.
9910 * @param pcbWritten Where to return the number of bytes written.
9911 * Optional.
9912 */
9913VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9914 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9915 uint32_t *pcbWritten)
9916{
9917 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9918
9919 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9920 VBOXSTRICTRC rcStrict;
9921 if ( cbOpcodeBytes
9922 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9923 {
9924 iemInitDecoder(pVCpu, true, false);
9925#ifdef IEM_WITH_CODE_TLB
9926 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9927 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9928 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9929 pVCpu->iem.s.offCurInstrStart = 0;
9930 pVCpu->iem.s.offInstrNextByte = 0;
9931#else
9932 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9933 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9934#endif
9935 rcStrict = VINF_SUCCESS;
9936 }
9937 else
9938 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9939 if (rcStrict == VINF_SUCCESS)
9940 {
9941 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9942 if (pcbWritten)
9943 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9944 }
9945 else if (pVCpu->iem.s.cActiveMappings > 0)
9946 iemMemRollback(pVCpu);
9947
9948 return rcStrict;
9949}
9950
9951
9952/**
9953 * For handling split cacheline lock operations when the host has split-lock
9954 * detection enabled.
9955 *
9956 * This will cause the interpreter to disregard the lock prefix and implicit
9957 * locking (xchg).
9958 *
9959 * @returns Strict VBox status code.
9960 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9961 */
9962VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9963{
9964 /*
9965 * Do the decoding and emulation.
9966 */
9967 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9968 if (rcStrict == VINF_SUCCESS)
9969 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9970 else if (pVCpu->iem.s.cActiveMappings > 0)
9971 iemMemRollback(pVCpu);
9972
9973 if (rcStrict != VINF_SUCCESS)
9974 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9975 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9976 return rcStrict;
9977}
9978
9979
9980VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9981{
9982 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9983 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9984
9985 /*
9986 * See if there is an interrupt pending in TRPM, inject it if we can.
9987 */
9988 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9989#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9990 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9991 if (fIntrEnabled)
9992 {
9993 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9994 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9995 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9996 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9997 else
9998 {
9999 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10000 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10001 }
10002 }
10003#else
10004 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10005#endif
10006
10007 /** @todo What if we are injecting an exception and not an interrupt? Is that
10008 * possible here? For now we assert it is indeed only an interrupt. */
10009 if ( fIntrEnabled
10010 && TRPMHasTrap(pVCpu)
10011 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
10012 {
10013 uint8_t u8TrapNo;
10014 TRPMEVENT enmType;
10015 uint32_t uErrCode;
10016 RTGCPTR uCr2;
10017 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
10018 AssertRC(rc2);
10019 Assert(enmType == TRPM_HARDWARE_INT);
10020 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10021 TRPMResetTrap(pVCpu);
10022#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10023 /* Injecting an event may cause a VM-exit. */
10024 if ( rcStrict != VINF_SUCCESS
10025 && rcStrict != VINF_IEM_RAISED_XCPT)
10026 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10027#else
10028 NOREF(rcStrict);
10029#endif
10030 }
10031
10032 /*
10033 * Initial decoder init w/ prefetch, then setup setjmp.
10034 */
10035 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10036 if (rcStrict == VINF_SUCCESS)
10037 {
10038#ifdef IEM_WITH_SETJMP
10039 jmp_buf JmpBuf;
10040 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10041 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10042 pVCpu->iem.s.cActiveMappings = 0;
10043 if ((rcStrict = setjmp(JmpBuf)) == 0)
10044#endif
10045 {
10046 /*
10047 * The run loop. We limit ourselves to 4096 instructions right now.
10048 */
10049 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10050 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10051 for (;;)
10052 {
10053 /*
10054 * Log the state.
10055 */
10056#ifdef LOG_ENABLED
10057 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10058#endif
10059
10060 /*
10061 * Do the decoding and emulation.
10062 */
10063 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10064 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10065 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10066 {
10067 Assert(pVCpu->iem.s.cActiveMappings == 0);
10068 pVCpu->iem.s.cInstructions++;
10069
10070#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10071 /* Perform any VMX nested-guest instruction boundary actions. */
10072 uint64_t fCpu = pVCpu->fLocalForcedActions;
10073 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10074 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10075 { /* likely */ }
10076 else
10077 {
10078 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10079 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10080 fCpu = pVCpu->fLocalForcedActions;
10081 else
10082 {
10083 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10084 break;
10085 }
10086 }
10087#endif
10088 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10089 {
10090#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10091 uint64_t fCpu = pVCpu->fLocalForcedActions;
10092#endif
10093 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10094 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10095 | VMCPU_FF_TLB_FLUSH
10096 | VMCPU_FF_INHIBIT_INTERRUPTS
10097 | VMCPU_FF_BLOCK_NMIS
10098 | VMCPU_FF_UNHALT );
10099
10100 if (RT_LIKELY( ( !fCpu
10101 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10102 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10103 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10104 {
10105 if (cMaxInstructionsGccStupidity-- > 0)
10106 {
10107 /* Poll timers every now an then according to the caller's specs. */
10108 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10109 || !TMTimerPollBool(pVM, pVCpu))
10110 {
10111 Assert(pVCpu->iem.s.cActiveMappings == 0);
10112 iemReInitDecoder(pVCpu);
10113 continue;
10114 }
10115 }
10116 }
10117 }
10118 Assert(pVCpu->iem.s.cActiveMappings == 0);
10119 }
10120 else if (pVCpu->iem.s.cActiveMappings > 0)
10121 iemMemRollback(pVCpu);
10122 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10123 break;
10124 }
10125 }
10126#ifdef IEM_WITH_SETJMP
10127 else
10128 {
10129 if (pVCpu->iem.s.cActiveMappings > 0)
10130 iemMemRollback(pVCpu);
10131# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10132 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10133# endif
10134 pVCpu->iem.s.cLongJumps++;
10135 }
10136 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10137#endif
10138
10139 /*
10140 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10141 */
10142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10144 }
10145 else
10146 {
10147 if (pVCpu->iem.s.cActiveMappings > 0)
10148 iemMemRollback(pVCpu);
10149
10150#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10151 /*
10152 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10153 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10154 */
10155 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10156#endif
10157 }
10158
10159 /*
10160 * Maybe re-enter raw-mode and log.
10161 */
10162 if (rcStrict != VINF_SUCCESS)
10163 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10164 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10165 if (pcInstructions)
10166 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10167 return rcStrict;
10168}
10169
10170
10171/**
10172 * Interface used by EMExecuteExec, does exit statistics and limits.
10173 *
10174 * @returns Strict VBox status code.
10175 * @param pVCpu The cross context virtual CPU structure.
10176 * @param fWillExit To be defined.
10177 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10178 * @param cMaxInstructions Maximum number of instructions to execute.
10179 * @param cMaxInstructionsWithoutExits
10180 * The max number of instructions without exits.
10181 * @param pStats Where to return statistics.
10182 */
10183VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10184 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10185{
10186 NOREF(fWillExit); /** @todo define flexible exit crits */
10187
10188 /*
10189 * Initialize return stats.
10190 */
10191 pStats->cInstructions = 0;
10192 pStats->cExits = 0;
10193 pStats->cMaxExitDistance = 0;
10194 pStats->cReserved = 0;
10195
10196 /*
10197 * Initial decoder init w/ prefetch, then setup setjmp.
10198 */
10199 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10200 if (rcStrict == VINF_SUCCESS)
10201 {
10202#ifdef IEM_WITH_SETJMP
10203 jmp_buf JmpBuf;
10204 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10205 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10206 pVCpu->iem.s.cActiveMappings = 0;
10207 if ((rcStrict = setjmp(JmpBuf)) == 0)
10208#endif
10209 {
10210#ifdef IN_RING0
10211 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10212#endif
10213 uint32_t cInstructionSinceLastExit = 0;
10214
10215 /*
10216 * The run loop. We limit ourselves to 4096 instructions right now.
10217 */
10218 PVM pVM = pVCpu->CTX_SUFF(pVM);
10219 for (;;)
10220 {
10221 /*
10222 * Log the state.
10223 */
10224#ifdef LOG_ENABLED
10225 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10226#endif
10227
10228 /*
10229 * Do the decoding and emulation.
10230 */
10231 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10232
10233 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10234 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10235
10236 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10237 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10238 {
10239 pStats->cExits += 1;
10240 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10241 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10242 cInstructionSinceLastExit = 0;
10243 }
10244
10245 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10246 {
10247 Assert(pVCpu->iem.s.cActiveMappings == 0);
10248 pVCpu->iem.s.cInstructions++;
10249 pStats->cInstructions++;
10250 cInstructionSinceLastExit++;
10251
10252#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10253 /* Perform any VMX nested-guest instruction boundary actions. */
10254 uint64_t fCpu = pVCpu->fLocalForcedActions;
10255 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10256 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10257 { /* likely */ }
10258 else
10259 {
10260 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10261 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10262 fCpu = pVCpu->fLocalForcedActions;
10263 else
10264 {
10265 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10266 break;
10267 }
10268 }
10269#endif
10270 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10271 {
10272#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10273 uint64_t fCpu = pVCpu->fLocalForcedActions;
10274#endif
10275 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10276 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10277 | VMCPU_FF_TLB_FLUSH
10278 | VMCPU_FF_INHIBIT_INTERRUPTS
10279 | VMCPU_FF_BLOCK_NMIS
10280 | VMCPU_FF_UNHALT );
10281 if (RT_LIKELY( ( ( !fCpu
10282 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10283 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10284 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10285 || pStats->cInstructions < cMinInstructions))
10286 {
10287 if (pStats->cInstructions < cMaxInstructions)
10288 {
10289 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10290 {
10291#ifdef IN_RING0
10292 if ( !fCheckPreemptionPending
10293 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10294#endif
10295 {
10296 Assert(pVCpu->iem.s.cActiveMappings == 0);
10297 iemReInitDecoder(pVCpu);
10298 continue;
10299 }
10300#ifdef IN_RING0
10301 rcStrict = VINF_EM_RAW_INTERRUPT;
10302 break;
10303#endif
10304 }
10305 }
10306 }
10307 Assert(!(fCpu & VMCPU_FF_IEM));
10308 }
10309 Assert(pVCpu->iem.s.cActiveMappings == 0);
10310 }
10311 else if (pVCpu->iem.s.cActiveMappings > 0)
10312 iemMemRollback(pVCpu);
10313 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10314 break;
10315 }
10316 }
10317#ifdef IEM_WITH_SETJMP
10318 else
10319 {
10320 if (pVCpu->iem.s.cActiveMappings > 0)
10321 iemMemRollback(pVCpu);
10322 pVCpu->iem.s.cLongJumps++;
10323 }
10324 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10325#endif
10326
10327 /*
10328 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10329 */
10330 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10332 }
10333 else
10334 {
10335 if (pVCpu->iem.s.cActiveMappings > 0)
10336 iemMemRollback(pVCpu);
10337
10338#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10339 /*
10340 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10341 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10342 */
10343 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10344#endif
10345 }
10346
10347 /*
10348 * Maybe re-enter raw-mode and log.
10349 */
10350 if (rcStrict != VINF_SUCCESS)
10351 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10352 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10353 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10354 return rcStrict;
10355}
10356
10357
10358/**
10359 * Injects a trap, fault, abort, software interrupt or external interrupt.
10360 *
10361 * The parameter list matches TRPMQueryTrapAll pretty closely.
10362 *
10363 * @returns Strict VBox status code.
10364 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10365 * @param u8TrapNo The trap number.
10366 * @param enmType What type is it (trap/fault/abort), software
10367 * interrupt or hardware interrupt.
10368 * @param uErrCode The error code if applicable.
10369 * @param uCr2 The CR2 value if applicable.
10370 * @param cbInstr The instruction length (only relevant for
10371 * software interrupts).
10372 */
10373VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10374 uint8_t cbInstr)
10375{
10376 iemInitDecoder(pVCpu, false, false);
10377#ifdef DBGFTRACE_ENABLED
10378 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10379 u8TrapNo, enmType, uErrCode, uCr2);
10380#endif
10381
10382 uint32_t fFlags;
10383 switch (enmType)
10384 {
10385 case TRPM_HARDWARE_INT:
10386 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10387 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10388 uErrCode = uCr2 = 0;
10389 break;
10390
10391 case TRPM_SOFTWARE_INT:
10392 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10393 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10394 uErrCode = uCr2 = 0;
10395 break;
10396
10397 case TRPM_TRAP:
10398 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10399 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10400 if (u8TrapNo == X86_XCPT_PF)
10401 fFlags |= IEM_XCPT_FLAGS_CR2;
10402 switch (u8TrapNo)
10403 {
10404 case X86_XCPT_DF:
10405 case X86_XCPT_TS:
10406 case X86_XCPT_NP:
10407 case X86_XCPT_SS:
10408 case X86_XCPT_PF:
10409 case X86_XCPT_AC:
10410 case X86_XCPT_GP:
10411 fFlags |= IEM_XCPT_FLAGS_ERR;
10412 break;
10413 }
10414 break;
10415
10416 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10417 }
10418
10419 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10420
10421 if (pVCpu->iem.s.cActiveMappings > 0)
10422 iemMemRollback(pVCpu);
10423
10424 return rcStrict;
10425}
10426
10427
10428/**
10429 * Injects the active TRPM event.
10430 *
10431 * @returns Strict VBox status code.
10432 * @param pVCpu The cross context virtual CPU structure.
10433 */
10434VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10435{
10436#ifndef IEM_IMPLEMENTS_TASKSWITCH
10437 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10438#else
10439 uint8_t u8TrapNo;
10440 TRPMEVENT enmType;
10441 uint32_t uErrCode;
10442 RTGCUINTPTR uCr2;
10443 uint8_t cbInstr;
10444 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10445 if (RT_FAILURE(rc))
10446 return rc;
10447
10448 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10449 * ICEBP \#DB injection as a special case. */
10450 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10451#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10452 if (rcStrict == VINF_SVM_VMEXIT)
10453 rcStrict = VINF_SUCCESS;
10454#endif
10455#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10456 if (rcStrict == VINF_VMX_VMEXIT)
10457 rcStrict = VINF_SUCCESS;
10458#endif
10459 /** @todo Are there any other codes that imply the event was successfully
10460 * delivered to the guest? See @bugref{6607}. */
10461 if ( rcStrict == VINF_SUCCESS
10462 || rcStrict == VINF_IEM_RAISED_XCPT)
10463 TRPMResetTrap(pVCpu);
10464
10465 return rcStrict;
10466#endif
10467}
10468
10469
10470VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10471{
10472 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10473 return VERR_NOT_IMPLEMENTED;
10474}
10475
10476
10477VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10478{
10479 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10480 return VERR_NOT_IMPLEMENTED;
10481}
10482
10483
10484#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10485/**
10486 * Executes a IRET instruction with default operand size.
10487 *
10488 * This is for PATM.
10489 *
10490 * @returns VBox status code.
10491 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10492 * @param pCtxCore The register frame.
10493 */
10494VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10495{
10496 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10497
10498 iemCtxCoreToCtx(pCtx, pCtxCore);
10499 iemInitDecoder(pVCpu);
10500 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10501 if (rcStrict == VINF_SUCCESS)
10502 iemCtxToCtxCore(pCtxCore, pCtx);
10503 else
10504 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10505 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10506 return rcStrict;
10507}
10508#endif
10509
10510
10511/**
10512 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10513 *
10514 * This API ASSUMES that the caller has already verified that the guest code is
10515 * allowed to access the I/O port. (The I/O port is in the DX register in the
10516 * guest state.)
10517 *
10518 * @returns Strict VBox status code.
10519 * @param pVCpu The cross context virtual CPU structure.
10520 * @param cbValue The size of the I/O port access (1, 2, or 4).
10521 * @param enmAddrMode The addressing mode.
10522 * @param fRepPrefix Indicates whether a repeat prefix is used
10523 * (doesn't matter which for this instruction).
10524 * @param cbInstr The instruction length in bytes.
10525 * @param iEffSeg The effective segment address.
10526 * @param fIoChecked Whether the access to the I/O port has been
10527 * checked or not. It's typically checked in the
10528 * HM scenario.
10529 */
10530VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10531 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10532{
10533 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10534 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10535
10536 /*
10537 * State init.
10538 */
10539 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10540
10541 /*
10542 * Switch orgy for getting to the right handler.
10543 */
10544 VBOXSTRICTRC rcStrict;
10545 if (fRepPrefix)
10546 {
10547 switch (enmAddrMode)
10548 {
10549 case IEMMODE_16BIT:
10550 switch (cbValue)
10551 {
10552 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10553 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10554 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10555 default:
10556 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10557 }
10558 break;
10559
10560 case IEMMODE_32BIT:
10561 switch (cbValue)
10562 {
10563 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10564 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10565 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10566 default:
10567 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10568 }
10569 break;
10570
10571 case IEMMODE_64BIT:
10572 switch (cbValue)
10573 {
10574 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10575 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10576 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10577 default:
10578 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10579 }
10580 break;
10581
10582 default:
10583 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10584 }
10585 }
10586 else
10587 {
10588 switch (enmAddrMode)
10589 {
10590 case IEMMODE_16BIT:
10591 switch (cbValue)
10592 {
10593 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10594 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10595 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10596 default:
10597 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10598 }
10599 break;
10600
10601 case IEMMODE_32BIT:
10602 switch (cbValue)
10603 {
10604 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10605 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10606 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10607 default:
10608 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10609 }
10610 break;
10611
10612 case IEMMODE_64BIT:
10613 switch (cbValue)
10614 {
10615 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10616 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10617 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10618 default:
10619 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10620 }
10621 break;
10622
10623 default:
10624 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10625 }
10626 }
10627
10628 if (pVCpu->iem.s.cActiveMappings)
10629 iemMemRollback(pVCpu);
10630
10631 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10632}
10633
10634
10635/**
10636 * Interface for HM and EM for executing string I/O IN (read) instructions.
10637 *
10638 * This API ASSUMES that the caller has already verified that the guest code is
10639 * allowed to access the I/O port. (The I/O port is in the DX register in the
10640 * guest state.)
10641 *
10642 * @returns Strict VBox status code.
10643 * @param pVCpu The cross context virtual CPU structure.
10644 * @param cbValue The size of the I/O port access (1, 2, or 4).
10645 * @param enmAddrMode The addressing mode.
10646 * @param fRepPrefix Indicates whether a repeat prefix is used
10647 * (doesn't matter which for this instruction).
10648 * @param cbInstr The instruction length in bytes.
10649 * @param fIoChecked Whether the access to the I/O port has been
10650 * checked or not. It's typically checked in the
10651 * HM scenario.
10652 */
10653VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10654 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10655{
10656 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10657
10658 /*
10659 * State init.
10660 */
10661 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10662
10663 /*
10664 * Switch orgy for getting to the right handler.
10665 */
10666 VBOXSTRICTRC rcStrict;
10667 if (fRepPrefix)
10668 {
10669 switch (enmAddrMode)
10670 {
10671 case IEMMODE_16BIT:
10672 switch (cbValue)
10673 {
10674 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10675 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10676 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10677 default:
10678 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10679 }
10680 break;
10681
10682 case IEMMODE_32BIT:
10683 switch (cbValue)
10684 {
10685 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10686 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10687 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10688 default:
10689 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10690 }
10691 break;
10692
10693 case IEMMODE_64BIT:
10694 switch (cbValue)
10695 {
10696 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10697 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10698 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10699 default:
10700 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10701 }
10702 break;
10703
10704 default:
10705 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10706 }
10707 }
10708 else
10709 {
10710 switch (enmAddrMode)
10711 {
10712 case IEMMODE_16BIT:
10713 switch (cbValue)
10714 {
10715 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10716 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10717 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10718 default:
10719 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10720 }
10721 break;
10722
10723 case IEMMODE_32BIT:
10724 switch (cbValue)
10725 {
10726 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10727 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10728 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10729 default:
10730 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10731 }
10732 break;
10733
10734 case IEMMODE_64BIT:
10735 switch (cbValue)
10736 {
10737 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10738 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10739 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10740 default:
10741 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10742 }
10743 break;
10744
10745 default:
10746 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10747 }
10748 }
10749
10750 if ( pVCpu->iem.s.cActiveMappings == 0
10751 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10752 { /* likely */ }
10753 else
10754 {
10755 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10756 iemMemRollback(pVCpu);
10757 }
10758 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10759}
10760
10761
10762/**
10763 * Interface for rawmode to write execute an OUT instruction.
10764 *
10765 * @returns Strict VBox status code.
10766 * @param pVCpu The cross context virtual CPU structure.
10767 * @param cbInstr The instruction length in bytes.
10768 * @param u16Port The port to read.
10769 * @param fImm Whether the port is specified using an immediate operand or
10770 * using the implicit DX register.
10771 * @param cbReg The register size.
10772 *
10773 * @remarks In ring-0 not all of the state needs to be synced in.
10774 */
10775VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10776{
10777 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10778 Assert(cbReg <= 4 && cbReg != 3);
10779
10780 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10781 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10782 Assert(!pVCpu->iem.s.cActiveMappings);
10783 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10784}
10785
10786
10787/**
10788 * Interface for rawmode to write execute an IN instruction.
10789 *
10790 * @returns Strict VBox status code.
10791 * @param pVCpu The cross context virtual CPU structure.
10792 * @param cbInstr The instruction length in bytes.
10793 * @param u16Port The port to read.
10794 * @param fImm Whether the port is specified using an immediate operand or
10795 * using the implicit DX.
10796 * @param cbReg The register size.
10797 */
10798VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10799{
10800 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10801 Assert(cbReg <= 4 && cbReg != 3);
10802
10803 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10804 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10805 Assert(!pVCpu->iem.s.cActiveMappings);
10806 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10807}
10808
10809
10810/**
10811 * Interface for HM and EM to write to a CRx register.
10812 *
10813 * @returns Strict VBox status code.
10814 * @param pVCpu The cross context virtual CPU structure.
10815 * @param cbInstr The instruction length in bytes.
10816 * @param iCrReg The control register number (destination).
10817 * @param iGReg The general purpose register number (source).
10818 *
10819 * @remarks In ring-0 not all of the state needs to be synced in.
10820 */
10821VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10822{
10823 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10824 Assert(iCrReg < 16);
10825 Assert(iGReg < 16);
10826
10827 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10828 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10829 Assert(!pVCpu->iem.s.cActiveMappings);
10830 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10831}
10832
10833
10834/**
10835 * Interface for HM and EM to read from a CRx register.
10836 *
10837 * @returns Strict VBox status code.
10838 * @param pVCpu The cross context virtual CPU structure.
10839 * @param cbInstr The instruction length in bytes.
10840 * @param iGReg The general purpose register number (destination).
10841 * @param iCrReg The control register number (source).
10842 *
10843 * @remarks In ring-0 not all of the state needs to be synced in.
10844 */
10845VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10846{
10847 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10848 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10849 | CPUMCTX_EXTRN_APIC_TPR);
10850 Assert(iCrReg < 16);
10851 Assert(iGReg < 16);
10852
10853 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10854 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10855 Assert(!pVCpu->iem.s.cActiveMappings);
10856 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10857}
10858
10859
10860/**
10861 * Interface for HM and EM to clear the CR0[TS] bit.
10862 *
10863 * @returns Strict VBox status code.
10864 * @param pVCpu The cross context virtual CPU structure.
10865 * @param cbInstr The instruction length in bytes.
10866 *
10867 * @remarks In ring-0 not all of the state needs to be synced in.
10868 */
10869VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10870{
10871 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10872
10873 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10874 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10875 Assert(!pVCpu->iem.s.cActiveMappings);
10876 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10877}
10878
10879
10880/**
10881 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10882 *
10883 * @returns Strict VBox status code.
10884 * @param pVCpu The cross context virtual CPU structure.
10885 * @param cbInstr The instruction length in bytes.
10886 * @param uValue The value to load into CR0.
10887 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10888 * memory operand. Otherwise pass NIL_RTGCPTR.
10889 *
10890 * @remarks In ring-0 not all of the state needs to be synced in.
10891 */
10892VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10893{
10894 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10895
10896 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10897 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10898 Assert(!pVCpu->iem.s.cActiveMappings);
10899 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10900}
10901
10902
10903/**
10904 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10905 *
10906 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10907 *
10908 * @returns Strict VBox status code.
10909 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10910 * @param cbInstr The instruction length in bytes.
10911 * @remarks In ring-0 not all of the state needs to be synced in.
10912 * @thread EMT(pVCpu)
10913 */
10914VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10915{
10916 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10917
10918 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10919 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10920 Assert(!pVCpu->iem.s.cActiveMappings);
10921 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10922}
10923
10924
10925/**
10926 * Interface for HM and EM to emulate the WBINVD instruction.
10927 *
10928 * @returns Strict VBox status code.
10929 * @param pVCpu The cross context virtual CPU structure.
10930 * @param cbInstr The instruction length in bytes.
10931 *
10932 * @remarks In ring-0 not all of the state needs to be synced in.
10933 */
10934VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10935{
10936 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10937
10938 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10939 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10940 Assert(!pVCpu->iem.s.cActiveMappings);
10941 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10942}
10943
10944
10945/**
10946 * Interface for HM and EM to emulate the INVD instruction.
10947 *
10948 * @returns Strict VBox status code.
10949 * @param pVCpu The cross context virtual CPU structure.
10950 * @param cbInstr The instruction length in bytes.
10951 *
10952 * @remarks In ring-0 not all of the state needs to be synced in.
10953 */
10954VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10955{
10956 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10957
10958 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10959 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10960 Assert(!pVCpu->iem.s.cActiveMappings);
10961 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10962}
10963
10964
10965/**
10966 * Interface for HM and EM to emulate the INVLPG instruction.
10967 *
10968 * @returns Strict VBox status code.
10969 * @retval VINF_PGM_SYNC_CR3
10970 *
10971 * @param pVCpu The cross context virtual CPU structure.
10972 * @param cbInstr The instruction length in bytes.
10973 * @param GCPtrPage The effective address of the page to invalidate.
10974 *
10975 * @remarks In ring-0 not all of the state needs to be synced in.
10976 */
10977VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10978{
10979 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10980
10981 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10982 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10983 Assert(!pVCpu->iem.s.cActiveMappings);
10984 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10985}
10986
10987
10988/**
10989 * Interface for HM and EM to emulate the INVPCID instruction.
10990 *
10991 * @returns Strict VBox status code.
10992 * @retval VINF_PGM_SYNC_CR3
10993 *
10994 * @param pVCpu The cross context virtual CPU structure.
10995 * @param cbInstr The instruction length in bytes.
10996 * @param iEffSeg The effective segment register.
10997 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10998 * @param uType The invalidation type.
10999 *
11000 * @remarks In ring-0 not all of the state needs to be synced in.
11001 */
11002VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11003 uint64_t uType)
11004{
11005 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11006
11007 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11008 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11009 Assert(!pVCpu->iem.s.cActiveMappings);
11010 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11011}
11012
11013
11014/**
11015 * Interface for HM and EM to emulate the CPUID instruction.
11016 *
11017 * @returns Strict VBox status code.
11018 *
11019 * @param pVCpu The cross context virtual CPU structure.
11020 * @param cbInstr The instruction length in bytes.
11021 *
11022 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11023 */
11024VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11025{
11026 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11027 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11028
11029 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11030 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11031 Assert(!pVCpu->iem.s.cActiveMappings);
11032 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11033}
11034
11035
11036/**
11037 * Interface for HM and EM to emulate the RDPMC instruction.
11038 *
11039 * @returns Strict VBox status code.
11040 *
11041 * @param pVCpu The cross context virtual CPU structure.
11042 * @param cbInstr The instruction length in bytes.
11043 *
11044 * @remarks Not all of the state needs to be synced in.
11045 */
11046VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11047{
11048 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11049 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11050
11051 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11052 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11053 Assert(!pVCpu->iem.s.cActiveMappings);
11054 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11055}
11056
11057
11058/**
11059 * Interface for HM and EM to emulate the RDTSC instruction.
11060 *
11061 * @returns Strict VBox status code.
11062 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11063 *
11064 * @param pVCpu The cross context virtual CPU structure.
11065 * @param cbInstr The instruction length in bytes.
11066 *
11067 * @remarks Not all of the state needs to be synced in.
11068 */
11069VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11070{
11071 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11072 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11073
11074 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11075 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11076 Assert(!pVCpu->iem.s.cActiveMappings);
11077 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11078}
11079
11080
11081/**
11082 * Interface for HM and EM to emulate the RDTSCP instruction.
11083 *
11084 * @returns Strict VBox status code.
11085 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11086 *
11087 * @param pVCpu The cross context virtual CPU structure.
11088 * @param cbInstr The instruction length in bytes.
11089 *
11090 * @remarks Not all of the state needs to be synced in. Recommended
11091 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11092 */
11093VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11094{
11095 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11096 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11097
11098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11099 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11100 Assert(!pVCpu->iem.s.cActiveMappings);
11101 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11102}
11103
11104
11105/**
11106 * Interface for HM and EM to emulate the RDMSR instruction.
11107 *
11108 * @returns Strict VBox status code.
11109 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11110 *
11111 * @param pVCpu The cross context virtual CPU structure.
11112 * @param cbInstr The instruction length in bytes.
11113 *
11114 * @remarks Not all of the state needs to be synced in. Requires RCX and
11115 * (currently) all MSRs.
11116 */
11117VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11118{
11119 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11120 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11121
11122 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11123 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11124 Assert(!pVCpu->iem.s.cActiveMappings);
11125 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11126}
11127
11128
11129/**
11130 * Interface for HM and EM to emulate the WRMSR instruction.
11131 *
11132 * @returns Strict VBox status code.
11133 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11134 *
11135 * @param pVCpu The cross context virtual CPU structure.
11136 * @param cbInstr The instruction length in bytes.
11137 *
11138 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11139 * and (currently) all MSRs.
11140 */
11141VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11142{
11143 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11144 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11145 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11146
11147 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11148 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11149 Assert(!pVCpu->iem.s.cActiveMappings);
11150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11151}
11152
11153
11154/**
11155 * Interface for HM and EM to emulate the MONITOR instruction.
11156 *
11157 * @returns Strict VBox status code.
11158 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11159 *
11160 * @param pVCpu The cross context virtual CPU structure.
11161 * @param cbInstr The instruction length in bytes.
11162 *
11163 * @remarks Not all of the state needs to be synced in.
11164 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11165 * are used.
11166 */
11167VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11168{
11169 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11170 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11171
11172 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11174 Assert(!pVCpu->iem.s.cActiveMappings);
11175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11176}
11177
11178
11179/**
11180 * Interface for HM and EM to emulate the MWAIT instruction.
11181 *
11182 * @returns Strict VBox status code.
11183 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11184 *
11185 * @param pVCpu The cross context virtual CPU structure.
11186 * @param cbInstr The instruction length in bytes.
11187 *
11188 * @remarks Not all of the state needs to be synced in.
11189 */
11190VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11191{
11192 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11193 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11194
11195 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11196 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11197 Assert(!pVCpu->iem.s.cActiveMappings);
11198 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11199}
11200
11201
11202/**
11203 * Interface for HM and EM to emulate the HLT instruction.
11204 *
11205 * @returns Strict VBox status code.
11206 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11207 *
11208 * @param pVCpu The cross context virtual CPU structure.
11209 * @param cbInstr The instruction length in bytes.
11210 *
11211 * @remarks Not all of the state needs to be synced in.
11212 */
11213VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11214{
11215 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11216
11217 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11218 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11219 Assert(!pVCpu->iem.s.cActiveMappings);
11220 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11221}
11222
11223
11224/**
11225 * Checks if IEM is in the process of delivering an event (interrupt or
11226 * exception).
11227 *
11228 * @returns true if we're in the process of raising an interrupt or exception,
11229 * false otherwise.
11230 * @param pVCpu The cross context virtual CPU structure.
11231 * @param puVector Where to store the vector associated with the
11232 * currently delivered event, optional.
11233 * @param pfFlags Where to store th event delivery flags (see
11234 * IEM_XCPT_FLAGS_XXX), optional.
11235 * @param puErr Where to store the error code associated with the
11236 * event, optional.
11237 * @param puCr2 Where to store the CR2 associated with the event,
11238 * optional.
11239 * @remarks The caller should check the flags to determine if the error code and
11240 * CR2 are valid for the event.
11241 */
11242VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11243{
11244 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11245 if (fRaisingXcpt)
11246 {
11247 if (puVector)
11248 *puVector = pVCpu->iem.s.uCurXcpt;
11249 if (pfFlags)
11250 *pfFlags = pVCpu->iem.s.fCurXcpt;
11251 if (puErr)
11252 *puErr = pVCpu->iem.s.uCurXcptErr;
11253 if (puCr2)
11254 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11255 }
11256 return fRaisingXcpt;
11257}
11258
11259#ifdef IN_RING3
11260
11261/**
11262 * Handles the unlikely and probably fatal merge cases.
11263 *
11264 * @returns Merged status code.
11265 * @param rcStrict Current EM status code.
11266 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11267 * with @a rcStrict.
11268 * @param iMemMap The memory mapping index. For error reporting only.
11269 * @param pVCpu The cross context virtual CPU structure of the calling
11270 * thread, for error reporting only.
11271 */
11272DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11273 unsigned iMemMap, PVMCPUCC pVCpu)
11274{
11275 if (RT_FAILURE_NP(rcStrict))
11276 return rcStrict;
11277
11278 if (RT_FAILURE_NP(rcStrictCommit))
11279 return rcStrictCommit;
11280
11281 if (rcStrict == rcStrictCommit)
11282 return rcStrictCommit;
11283
11284 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11285 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11286 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11287 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11288 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11289 return VERR_IOM_FF_STATUS_IPE;
11290}
11291
11292
11293/**
11294 * Helper for IOMR3ProcessForceFlag.
11295 *
11296 * @returns Merged status code.
11297 * @param rcStrict Current EM status code.
11298 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11299 * with @a rcStrict.
11300 * @param iMemMap The memory mapping index. For error reporting only.
11301 * @param pVCpu The cross context virtual CPU structure of the calling
11302 * thread, for error reporting only.
11303 */
11304DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11305{
11306 /* Simple. */
11307 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11308 return rcStrictCommit;
11309
11310 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11311 return rcStrict;
11312
11313 /* EM scheduling status codes. */
11314 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11315 && rcStrict <= VINF_EM_LAST))
11316 {
11317 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11318 && rcStrictCommit <= VINF_EM_LAST))
11319 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11320 }
11321
11322 /* Unlikely */
11323 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11324}
11325
11326
11327/**
11328 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11329 *
11330 * @returns Merge between @a rcStrict and what the commit operation returned.
11331 * @param pVM The cross context VM structure.
11332 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11333 * @param rcStrict The status code returned by ring-0 or raw-mode.
11334 */
11335VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11336{
11337 /*
11338 * Reset the pending commit.
11339 */
11340 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11341 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11342 ("%#x %#x %#x\n",
11343 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11344 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11345
11346 /*
11347 * Commit the pending bounce buffers (usually just one).
11348 */
11349 unsigned cBufs = 0;
11350 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11351 while (iMemMap-- > 0)
11352 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11353 {
11354 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11355 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11356 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11357
11358 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11359 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11360 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11361
11362 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11363 {
11364 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11366 pbBuf,
11367 cbFirst,
11368 PGMACCESSORIGIN_IEM);
11369 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11370 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11371 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11372 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11373 }
11374
11375 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11376 {
11377 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11378 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11379 pbBuf + cbFirst,
11380 cbSecond,
11381 PGMACCESSORIGIN_IEM);
11382 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11383 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11384 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11385 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11386 }
11387 cBufs++;
11388 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11389 }
11390
11391 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11392 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11393 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11394 pVCpu->iem.s.cActiveMappings = 0;
11395 return rcStrict;
11396}
11397
11398#endif /* IN_RING3 */
11399
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use