VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 103665

Last change on this file since 103665 was 103665, checked in by vboxsync, 15 months ago

VMM/IEM: Native translation of IEM_MC_MAYBE_RAISE_FPU_XCPT() body, bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 449.5 KB
Line 
1/* $Id: IEMAll.cpp 103665 2024-03-04 12:50:11Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Set the accessed flags.
971 * ASSUMES this is set when the address is translated rather than on commit...
972 */
973 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
974 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
975 {
976 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
977 AssertRC(rc2);
978 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
979 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
980 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
981 }
982
983 /*
984 * Look up the physical page info if necessary.
985 */
986 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
987 { /* not necessary */ }
988 else
989 {
990 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
991 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
992 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
993 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
995 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
996 { /* likely */ }
997 else
998 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
999 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1000 | IEMTLBE_F_NO_MAPPINGR3
1001 | IEMTLBE_F_PG_NO_READ
1002 | IEMTLBE_F_PG_NO_WRITE
1003 | IEMTLBE_F_PG_UNASSIGNED
1004 | IEMTLBE_F_PG_CODE_PAGE);
1005 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1006 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1007 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1008 }
1009
1010# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1011 /*
1012 * Try do a direct read using the pbMappingR3 pointer.
1013 */
1014 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1015 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1016 {
1017 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1018 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1019 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1020 {
1021 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1022 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1023 }
1024 else
1025 {
1026 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1027 if (cbInstr + (uint32_t)cbDst <= 15)
1028 {
1029 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1030 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1031 }
1032 else
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1035 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1036 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1037 }
1038 }
1039 if (cbDst <= cbMaxRead)
1040 {
1041 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1042 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1043
1044 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1045 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1046 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1047 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1048 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1049 return;
1050 }
1051 pVCpu->iem.s.pbInstrBuf = NULL;
1052
1053 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1054 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1055 }
1056# else
1057# error "refactor as needed"
1058 /*
1059 * If there is no special read handling, so we can read a bit more and
1060 * put it in the prefetch buffer.
1061 */
1062 if ( cbDst < cbMaxRead
1063 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1066 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085# endif
1086 /*
1087 * Special read handling, so only read exactly what's needed.
1088 * This is a highly unlikely scenario.
1089 */
1090 else
1091 {
1092 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1093
1094 /* Check instruction length. */
1095 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1096 if (RT_LIKELY(cbInstr + cbDst <= 15))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1102 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1103 }
1104
1105 /* Do the reading. */
1106 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1107 if (cbToRead > 0)
1108 {
1109 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1110 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1111 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1112 { /* likely */ }
1113 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1114 {
1115 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1116 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1117 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1118 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1119 }
1120 else
1121 {
1122 Log((RT_SUCCESS(rcStrict)
1123 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1124 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1125 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1126 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1127 }
1128 }
1129
1130 /* Update the state and probably return. */
1131 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1132 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1133 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1134
1135 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1136 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1137 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1138 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1139 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1140 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1141 pVCpu->iem.s.pbInstrBuf = NULL;
1142 if (cbToRead == cbDst)
1143 return;
1144 }
1145
1146 /*
1147 * More to read, loop.
1148 */
1149 cbDst -= cbMaxRead;
1150 pvDst = (uint8_t *)pvDst + cbMaxRead;
1151 }
1152# else /* !IN_RING3 */
1153 RT_NOREF(pvDst, cbDst);
1154 if (pvDst || cbDst)
1155 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1156# endif /* !IN_RING3 */
1157}
1158
1159#else /* !IEM_WITH_CODE_TLB */
1160
1161/**
1162 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1163 * exception if it fails.
1164 *
1165 * @returns Strict VBox status code.
1166 * @param pVCpu The cross context virtual CPU structure of the
1167 * calling thread.
1168 * @param cbMin The minimum number of bytes relative offOpcode
1169 * that must be read.
1170 */
1171VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1172{
1173 /*
1174 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1175 *
1176 * First translate CS:rIP to a physical address.
1177 */
1178 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1179 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1180 uint8_t const cbLeft = cbOpcode - offOpcode;
1181 Assert(cbLeft < cbMin);
1182 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1183
1184 uint32_t cbToTryRead;
1185 RTGCPTR GCPtrNext;
1186 if (IEM_IS_64BIT_CODE(pVCpu))
1187 {
1188 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1189 if (!IEM_IS_CANONICAL(GCPtrNext))
1190 return iemRaiseGeneralProtectionFault0(pVCpu);
1191 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1192 }
1193 else
1194 {
1195 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1196 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1197 GCPtrNext32 += cbOpcode;
1198 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1199 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1200 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1201 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1202 if (!cbToTryRead) /* overflowed */
1203 {
1204 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1205 cbToTryRead = UINT32_MAX;
1206 /** @todo check out wrapping around the code segment. */
1207 }
1208 if (cbToTryRead < cbMin - cbLeft)
1209 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1210 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1211
1212 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1213 if (cbToTryRead > cbLeftOnPage)
1214 cbToTryRead = cbLeftOnPage;
1215 }
1216
1217 /* Restrict to opcode buffer space.
1218
1219 We're making ASSUMPTIONS here based on work done previously in
1220 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1221 be fetched in case of an instruction crossing two pages. */
1222 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1223 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1224 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1225 { /* likely */ }
1226 else
1227 {
1228 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1229 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1230 return iemRaiseGeneralProtectionFault0(pVCpu);
1231 }
1232
1233 PGMPTWALK Walk;
1234 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1235 if (RT_FAILURE(rc))
1236 {
1237 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1239 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1240 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1241#endif
1242 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1243 }
1244 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1245 {
1246 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1250#endif
1251 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1252 }
1253 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1254 {
1255 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1257 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1258 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1259#endif
1260 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1261 }
1262 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1263 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1264 /** @todo Check reserved bits and such stuff. PGM is better at doing
1265 * that, so do it when implementing the guest virtual address
1266 * TLB... */
1267
1268 /*
1269 * Read the bytes at this address.
1270 *
1271 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1272 * and since PATM should only patch the start of an instruction there
1273 * should be no need to check again here.
1274 */
1275 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1276 {
1277 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1278 cbToTryRead, PGMACCESSORIGIN_IEM);
1279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1280 { /* likely */ }
1281 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1282 {
1283 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1284 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1285 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1286 }
1287 else
1288 {
1289 Log((RT_SUCCESS(rcStrict)
1290 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1291 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1292 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 return rcStrict;
1294 }
1295 }
1296 else
1297 {
1298 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1299 if (RT_SUCCESS(rc))
1300 { /* likely */ }
1301 else
1302 {
1303 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1304 return rc;
1305 }
1306 }
1307 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1308 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1309
1310 return VINF_SUCCESS;
1311}
1312
1313#endif /* !IEM_WITH_CODE_TLB */
1314#ifndef IEM_WITH_SETJMP
1315
1316/**
1317 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pVCpu The cross context virtual CPU structure of the
1321 * calling thread.
1322 * @param pb Where to return the opcode byte.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1331 pVCpu->iem.s.offOpcode = offOpcode + 1;
1332 }
1333 else
1334 *pb = 0;
1335 return rcStrict;
1336}
1337
1338#else /* IEM_WITH_SETJMP */
1339
1340/**
1341 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1342 *
1343 * @returns The opcode byte.
1344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1345 */
1346uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1347{
1348# ifdef IEM_WITH_CODE_TLB
1349 uint8_t u8;
1350 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1351 return u8;
1352# else
1353 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1354 if (rcStrict == VINF_SUCCESS)
1355 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1356 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1357# endif
1358}
1359
1360#endif /* IEM_WITH_SETJMP */
1361
1362#ifndef IEM_WITH_SETJMP
1363
1364/**
1365 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1366 *
1367 * @returns Strict VBox status code.
1368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1369 * @param pu16 Where to return the opcode dword.
1370 */
1371VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1372{
1373 uint8_t u8;
1374 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1375 if (rcStrict == VINF_SUCCESS)
1376 *pu16 = (int8_t)u8;
1377 return rcStrict;
1378}
1379
1380
1381/**
1382 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1386 * @param pu32 Where to return the opcode dword.
1387 */
1388VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1389{
1390 uint8_t u8;
1391 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1392 if (rcStrict == VINF_SUCCESS)
1393 *pu32 = (int8_t)u8;
1394 return rcStrict;
1395}
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416
1417#ifndef IEM_WITH_SETJMP
1418
1419/**
1420 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1424 * @param pu16 Where to return the opcode word.
1425 */
1426VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1427{
1428 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1429 if (rcStrict == VINF_SUCCESS)
1430 {
1431 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1433 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1434# else
1435 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1436# endif
1437 pVCpu->iem.s.offOpcode = offOpcode + 2;
1438 }
1439 else
1440 *pu16 = 0;
1441 return rcStrict;
1442}
1443
1444#else /* IEM_WITH_SETJMP */
1445
1446/**
1447 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1448 *
1449 * @returns The opcode word.
1450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1451 */
1452uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1453{
1454# ifdef IEM_WITH_CODE_TLB
1455 uint16_t u16;
1456 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1457 return u16;
1458# else
1459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1460 if (rcStrict == VINF_SUCCESS)
1461 {
1462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1463 pVCpu->iem.s.offOpcode += 2;
1464# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1465 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1466# else
1467 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1468# endif
1469 }
1470 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1471# endif
1472}
1473
1474#endif /* IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode double word.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1492 pVCpu->iem.s.offOpcode = offOpcode + 2;
1493 }
1494 else
1495 *pu32 = 0;
1496 return rcStrict;
1497}
1498
1499
1500/**
1501 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1502 *
1503 * @returns Strict VBox status code.
1504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1505 * @param pu64 Where to return the opcode quad word.
1506 */
1507VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1508{
1509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1510 if (rcStrict == VINF_SUCCESS)
1511 {
1512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1513 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1514 pVCpu->iem.s.offOpcode = offOpcode + 2;
1515 }
1516 else
1517 *pu64 = 0;
1518 return rcStrict;
1519}
1520
1521#endif /* !IEM_WITH_SETJMP */
1522
1523#ifndef IEM_WITH_SETJMP
1524
1525/**
1526 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1527 *
1528 * @returns Strict VBox status code.
1529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1530 * @param pu32 Where to return the opcode dword.
1531 */
1532VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1533{
1534 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1535 if (rcStrict == VINF_SUCCESS)
1536 {
1537 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 pVCpu->iem.s.offOpcode = offOpcode + 4;
1547 }
1548 else
1549 *pu32 = 0;
1550 return rcStrict;
1551}
1552
1553#else /* IEM_WITH_SETJMP */
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1557 *
1558 * @returns The opcode dword.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 */
1561uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1562{
1563# ifdef IEM_WITH_CODE_TLB
1564 uint32_t u32;
1565 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1566 return u32;
1567# else
1568 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1569 if (rcStrict == VINF_SUCCESS)
1570 {
1571 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1574 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1575# else
1576 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1577 pVCpu->iem.s.abOpcode[offOpcode + 1],
1578 pVCpu->iem.s.abOpcode[offOpcode + 2],
1579 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1580# endif
1581 }
1582 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1583# endif
1584}
1585
1586#endif /* IEM_WITH_SETJMP */
1587
1588#ifndef IEM_WITH_SETJMP
1589
1590/**
1591 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1592 *
1593 * @returns Strict VBox status code.
1594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1595 * @param pu64 Where to return the opcode dword.
1596 */
1597VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1598{
1599 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1600 if (rcStrict == VINF_SUCCESS)
1601 {
1602 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1603 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1604 pVCpu->iem.s.abOpcode[offOpcode + 1],
1605 pVCpu->iem.s.abOpcode[offOpcode + 2],
1606 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1607 pVCpu->iem.s.offOpcode = offOpcode + 4;
1608 }
1609 else
1610 *pu64 = 0;
1611 return rcStrict;
1612}
1613
1614
1615/**
1616 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1617 *
1618 * @returns Strict VBox status code.
1619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1620 * @param pu64 Where to return the opcode qword.
1621 */
1622VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1623{
1624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1625 if (rcStrict == VINF_SUCCESS)
1626 {
1627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1628 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1629 pVCpu->iem.s.abOpcode[offOpcode + 1],
1630 pVCpu->iem.s.abOpcode[offOpcode + 2],
1631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1632 pVCpu->iem.s.offOpcode = offOpcode + 4;
1633 }
1634 else
1635 *pu64 = 0;
1636 return rcStrict;
1637}
1638
1639#endif /* !IEM_WITH_SETJMP */
1640
1641#ifndef IEM_WITH_SETJMP
1642
1643/**
1644 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1648 * @param pu64 Where to return the opcode qword.
1649 */
1650VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1651{
1652 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1653 if (rcStrict == VINF_SUCCESS)
1654 {
1655 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1657 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1658# else
1659 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1660 pVCpu->iem.s.abOpcode[offOpcode + 1],
1661 pVCpu->iem.s.abOpcode[offOpcode + 2],
1662 pVCpu->iem.s.abOpcode[offOpcode + 3],
1663 pVCpu->iem.s.abOpcode[offOpcode + 4],
1664 pVCpu->iem.s.abOpcode[offOpcode + 5],
1665 pVCpu->iem.s.abOpcode[offOpcode + 6],
1666 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1667# endif
1668 pVCpu->iem.s.offOpcode = offOpcode + 8;
1669 }
1670 else
1671 *pu64 = 0;
1672 return rcStrict;
1673}
1674
1675#else /* IEM_WITH_SETJMP */
1676
1677/**
1678 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1679 *
1680 * @returns The opcode qword.
1681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1682 */
1683uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1684{
1685# ifdef IEM_WITH_CODE_TLB
1686 uint64_t u64;
1687 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1688 return u64;
1689# else
1690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1691 if (rcStrict == VINF_SUCCESS)
1692 {
1693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1694 pVCpu->iem.s.offOpcode = offOpcode + 8;
1695# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1696 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1697# else
1698 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1699 pVCpu->iem.s.abOpcode[offOpcode + 1],
1700 pVCpu->iem.s.abOpcode[offOpcode + 2],
1701 pVCpu->iem.s.abOpcode[offOpcode + 3],
1702 pVCpu->iem.s.abOpcode[offOpcode + 4],
1703 pVCpu->iem.s.abOpcode[offOpcode + 5],
1704 pVCpu->iem.s.abOpcode[offOpcode + 6],
1705 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1706# endif
1707 }
1708 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1709# endif
1710}
1711
1712#endif /* IEM_WITH_SETJMP */
1713
1714
1715
1716/** @name Misc Worker Functions.
1717 * @{
1718 */
1719
1720/**
1721 * Gets the exception class for the specified exception vector.
1722 *
1723 * @returns The class of the specified exception.
1724 * @param uVector The exception vector.
1725 */
1726static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1727{
1728 Assert(uVector <= X86_XCPT_LAST);
1729 switch (uVector)
1730 {
1731 case X86_XCPT_DE:
1732 case X86_XCPT_TS:
1733 case X86_XCPT_NP:
1734 case X86_XCPT_SS:
1735 case X86_XCPT_GP:
1736 case X86_XCPT_SX: /* AMD only */
1737 return IEMXCPTCLASS_CONTRIBUTORY;
1738
1739 case X86_XCPT_PF:
1740 case X86_XCPT_VE: /* Intel only */
1741 return IEMXCPTCLASS_PAGE_FAULT;
1742
1743 case X86_XCPT_DF:
1744 return IEMXCPTCLASS_DOUBLE_FAULT;
1745 }
1746 return IEMXCPTCLASS_BENIGN;
1747}
1748
1749
1750/**
1751 * Evaluates how to handle an exception caused during delivery of another event
1752 * (exception / interrupt).
1753 *
1754 * @returns How to handle the recursive exception.
1755 * @param pVCpu The cross context virtual CPU structure of the
1756 * calling thread.
1757 * @param fPrevFlags The flags of the previous event.
1758 * @param uPrevVector The vector of the previous event.
1759 * @param fCurFlags The flags of the current exception.
1760 * @param uCurVector The vector of the current exception.
1761 * @param pfXcptRaiseInfo Where to store additional information about the
1762 * exception condition. Optional.
1763 */
1764VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1765 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1766{
1767 /*
1768 * Only CPU exceptions can be raised while delivering other events, software interrupt
1769 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1770 */
1771 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1772 Assert(pVCpu); RT_NOREF(pVCpu);
1773 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1774
1775 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1776 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1777 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1778 {
1779 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1780 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1781 {
1782 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1783 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1784 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1785 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1786 {
1787 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1788 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1789 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1790 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1791 uCurVector, pVCpu->cpum.GstCtx.cr2));
1792 }
1793 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1794 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1795 {
1796 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1797 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1798 }
1799 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1800 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1802 {
1803 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1805 }
1806 }
1807 else
1808 {
1809 if (uPrevVector == X86_XCPT_NMI)
1810 {
1811 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1812 if (uCurVector == X86_XCPT_PF)
1813 {
1814 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1815 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1816 }
1817 }
1818 else if ( uPrevVector == X86_XCPT_AC
1819 && uCurVector == X86_XCPT_AC)
1820 {
1821 enmRaise = IEMXCPTRAISE_CPU_HANG;
1822 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1823 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1824 }
1825 }
1826 }
1827 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1828 {
1829 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1830 if (uCurVector == X86_XCPT_PF)
1831 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1832 }
1833 else
1834 {
1835 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1836 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1837 }
1838
1839 if (pfXcptRaiseInfo)
1840 *pfXcptRaiseInfo = fRaiseInfo;
1841 return enmRaise;
1842}
1843
1844
1845/**
1846 * Enters the CPU shutdown state initiated by a triple fault or other
1847 * unrecoverable conditions.
1848 *
1849 * @returns Strict VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the
1851 * calling thread.
1852 */
1853static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1854{
1855 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1856 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1857
1858 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1859 {
1860 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1861 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1862 }
1863
1864 RT_NOREF(pVCpu);
1865 return VINF_EM_TRIPLE_FAULT;
1866}
1867
1868
1869/**
1870 * Validates a new SS segment.
1871 *
1872 * @returns VBox strict status code.
1873 * @param pVCpu The cross context virtual CPU structure of the
1874 * calling thread.
1875 * @param NewSS The new SS selctor.
1876 * @param uCpl The CPL to load the stack for.
1877 * @param pDesc Where to return the descriptor.
1878 */
1879static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1880{
1881 /* Null selectors are not allowed (we're not called for dispatching
1882 interrupts with SS=0 in long mode). */
1883 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1886 return iemRaiseTaskSwitchFault0(pVCpu);
1887 }
1888
1889 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1890 if ((NewSS & X86_SEL_RPL) != uCpl)
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1893 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1894 }
1895
1896 /*
1897 * Read the descriptor.
1898 */
1899 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1900 if (rcStrict != VINF_SUCCESS)
1901 return rcStrict;
1902
1903 /*
1904 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1905 */
1906 if (!pDesc->Legacy.Gen.u1DescType)
1907 {
1908 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1909 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1910 }
1911
1912 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1913 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1919 {
1920 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1921 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1922 }
1923
1924 /* Is it there? */
1925 /** @todo testcase: Is this checked before the canonical / limit check below? */
1926 if (!pDesc->Legacy.Gen.u1Present)
1927 {
1928 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1929 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1930 }
1931
1932 return VINF_SUCCESS;
1933}
1934
1935/** @} */
1936
1937
1938/** @name Raising Exceptions.
1939 *
1940 * @{
1941 */
1942
1943
1944/**
1945 * Loads the specified stack far pointer from the TSS.
1946 *
1947 * @returns VBox strict status code.
1948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pSelSS Where to return the new stack segment.
1951 * @param puEsp Where to return the new stack pointer.
1952 */
1953static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1954{
1955 VBOXSTRICTRC rcStrict;
1956 Assert(uCpl < 4);
1957
1958 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1959 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1960 {
1961 /*
1962 * 16-bit TSS (X86TSS16).
1963 */
1964 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1965 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1966 {
1967 uint32_t off = uCpl * 4 + 2;
1968 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1969 {
1970 /** @todo check actual access pattern here. */
1971 uint32_t u32Tmp = 0; /* gcc maybe... */
1972 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1973 if (rcStrict == VINF_SUCCESS)
1974 {
1975 *puEsp = RT_LOWORD(u32Tmp);
1976 *pSelSS = RT_HIWORD(u32Tmp);
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 else
1981 {
1982 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1983 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1984 }
1985 break;
1986 }
1987
1988 /*
1989 * 32-bit TSS (X86TSS32).
1990 */
1991 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1992 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1993 {
1994 uint32_t off = uCpl * 8 + 4;
1995 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1996 {
1997/** @todo check actual access pattern here. */
1998 uint64_t u64Tmp;
1999 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2000 if (rcStrict == VINF_SUCCESS)
2001 {
2002 *puEsp = u64Tmp & UINT32_MAX;
2003 *pSelSS = (RTSEL)(u64Tmp >> 32);
2004 return VINF_SUCCESS;
2005 }
2006 }
2007 else
2008 {
2009 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2010 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2011 }
2012 break;
2013 }
2014
2015 default:
2016 AssertFailed();
2017 rcStrict = VERR_IEM_IPE_4;
2018 break;
2019 }
2020
2021 *puEsp = 0; /* make gcc happy */
2022 *pSelSS = 0; /* make gcc happy */
2023 return rcStrict;
2024}
2025
2026
2027/**
2028 * Loads the specified stack pointer from the 64-bit TSS.
2029 *
2030 * @returns VBox strict status code.
2031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2032 * @param uCpl The CPL to load the stack for.
2033 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2034 * @param puRsp Where to return the new stack pointer.
2035 */
2036static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2037{
2038 Assert(uCpl < 4);
2039 Assert(uIst < 8);
2040 *puRsp = 0; /* make gcc happy */
2041
2042 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2043 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2044
2045 uint32_t off;
2046 if (uIst)
2047 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2048 else
2049 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2050 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2051 {
2052 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2053 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2054 }
2055
2056 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2057}
2058
2059
2060/**
2061 * Adjust the CPU state according to the exception being raised.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2064 * @param u8Vector The exception that has been raised.
2065 */
2066DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2067{
2068 switch (u8Vector)
2069 {
2070 case X86_XCPT_DB:
2071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2072 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2073 break;
2074 /** @todo Read the AMD and Intel exception reference... */
2075 }
2076}
2077
2078
2079/**
2080 * Implements exceptions and interrupts for real mode.
2081 *
2082 * @returns VBox strict status code.
2083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2084 * @param cbInstr The number of bytes to offset rIP by in the return
2085 * address.
2086 * @param u8Vector The interrupt / exception vector number.
2087 * @param fFlags The flags.
2088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2090 */
2091static VBOXSTRICTRC
2092iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2093 uint8_t cbInstr,
2094 uint8_t u8Vector,
2095 uint32_t fFlags,
2096 uint16_t uErr,
2097 uint64_t uCr2) RT_NOEXCEPT
2098{
2099 NOREF(uErr); NOREF(uCr2);
2100 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2101
2102 /*
2103 * Read the IDT entry.
2104 */
2105 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2106 {
2107 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2109 }
2110 RTFAR16 Idte;
2111 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2113 {
2114 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2115 return rcStrict;
2116 }
2117
2118#ifdef LOG_ENABLED
2119 /* If software interrupt, try decode it if logging is enabled and such. */
2120 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2121 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2122 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2123#endif
2124
2125 /*
2126 * Push the stack frame.
2127 */
2128 uint8_t bUnmapInfo;
2129 uint16_t *pu16Frame;
2130 uint64_t uNewRsp;
2131 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2132 if (rcStrict != VINF_SUCCESS)
2133 return rcStrict;
2134
2135 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2136#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2137 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2138 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2139 fEfl |= UINT16_C(0xf000);
2140#endif
2141 pu16Frame[2] = (uint16_t)fEfl;
2142 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2143 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2144 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2145 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2146 return rcStrict;
2147
2148 /*
2149 * Load the vector address into cs:ip and make exception specific state
2150 * adjustments.
2151 */
2152 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2153 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2154 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2155 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2156 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2157 pVCpu->cpum.GstCtx.rip = Idte.off;
2158 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2159 IEMMISC_SET_EFL(pVCpu, fEfl);
2160
2161 /** @todo do we actually do this in real mode? */
2162 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2163 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2164
2165 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2166 so best leave them alone in case we're in a weird kind of real mode... */
2167
2168 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2169}
2170
2171
2172/**
2173 * Loads a NULL data selector into when coming from V8086 mode.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param pSReg Pointer to the segment register.
2177 */
2178DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2179{
2180 pSReg->Sel = 0;
2181 pSReg->ValidSel = 0;
2182 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2183 {
2184 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2185 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2186 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2187 }
2188 else
2189 {
2190 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2191 /** @todo check this on AMD-V */
2192 pSReg->u64Base = 0;
2193 pSReg->u32Limit = 0;
2194 }
2195}
2196
2197
2198/**
2199 * Loads a segment selector during a task switch in V8086 mode.
2200 *
2201 * @param pSReg Pointer to the segment register.
2202 * @param uSel The selector value to load.
2203 */
2204DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2205{
2206 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2207 pSReg->Sel = uSel;
2208 pSReg->ValidSel = uSel;
2209 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2210 pSReg->u64Base = uSel << 4;
2211 pSReg->u32Limit = 0xffff;
2212 pSReg->Attr.u = 0xf3;
2213}
2214
2215
2216/**
2217 * Loads a segment selector during a task switch in protected mode.
2218 *
2219 * In this task switch scenario, we would throw \#TS exceptions rather than
2220 * \#GPs.
2221 *
2222 * @returns VBox strict status code.
2223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2224 * @param pSReg Pointer to the segment register.
2225 * @param uSel The new selector value.
2226 *
2227 * @remarks This does _not_ handle CS or SS.
2228 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2229 */
2230static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2231{
2232 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2233
2234 /* Null data selector. */
2235 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2236 {
2237 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2239 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2240 return VINF_SUCCESS;
2241 }
2242
2243 /* Fetch the descriptor. */
2244 IEMSELDESC Desc;
2245 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2246 if (rcStrict != VINF_SUCCESS)
2247 {
2248 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2249 VBOXSTRICTRC_VAL(rcStrict)));
2250 return rcStrict;
2251 }
2252
2253 /* Must be a data segment or readable code segment. */
2254 if ( !Desc.Legacy.Gen.u1DescType
2255 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2256 {
2257 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2258 Desc.Legacy.Gen.u4Type));
2259 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* Check privileges for data segments and non-conforming code segments. */
2263 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2264 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2265 {
2266 /* The RPL and the new CPL must be less than or equal to the DPL. */
2267 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2268 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2269 {
2270 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2271 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2272 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2273 }
2274 }
2275
2276 /* Is it there? */
2277 if (!Desc.Legacy.Gen.u1Present)
2278 {
2279 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2280 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2281 }
2282
2283 /* The base and limit. */
2284 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2285 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2286
2287 /*
2288 * Ok, everything checked out fine. Now set the accessed bit before
2289 * committing the result into the registers.
2290 */
2291 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2292 {
2293 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2294 if (rcStrict != VINF_SUCCESS)
2295 return rcStrict;
2296 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2297 }
2298
2299 /* Commit */
2300 pSReg->Sel = uSel;
2301 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2302 pSReg->u32Limit = cbLimit;
2303 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2304 pSReg->ValidSel = uSel;
2305 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2306 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2307 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2308
2309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2310 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2311 return VINF_SUCCESS;
2312}
2313
2314
2315/**
2316 * Performs a task switch.
2317 *
2318 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2319 * caller is responsible for performing the necessary checks (like DPL, TSS
2320 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2321 * reference for JMP, CALL, IRET.
2322 *
2323 * If the task switch is the due to a software interrupt or hardware exception,
2324 * the caller is responsible for validating the TSS selector and descriptor. See
2325 * Intel Instruction reference for INT n.
2326 *
2327 * @returns VBox strict status code.
2328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2329 * @param enmTaskSwitch The cause of the task switch.
2330 * @param uNextEip The EIP effective after the task switch.
2331 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2332 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2333 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2334 * @param SelTss The TSS selector of the new task.
2335 * @param pNewDescTss Pointer to the new TSS descriptor.
2336 */
2337VBOXSTRICTRC
2338iemTaskSwitch(PVMCPUCC pVCpu,
2339 IEMTASKSWITCH enmTaskSwitch,
2340 uint32_t uNextEip,
2341 uint32_t fFlags,
2342 uint16_t uErr,
2343 uint64_t uCr2,
2344 RTSEL SelTss,
2345 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2346{
2347 Assert(!IEM_IS_REAL_MODE(pVCpu));
2348 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2349 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2350
2351 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2352 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2353 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2354 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2355 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2356
2357 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2358 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2359
2360 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2361 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2362
2363 /* Update CR2 in case it's a page-fault. */
2364 /** @todo This should probably be done much earlier in IEM/PGM. See
2365 * @bugref{5653#c49}. */
2366 if (fFlags & IEM_XCPT_FLAGS_CR2)
2367 pVCpu->cpum.GstCtx.cr2 = uCr2;
2368
2369 /*
2370 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2371 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2372 */
2373 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2374 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2375 if (uNewTssLimit < uNewTssLimitMin)
2376 {
2377 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2378 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2379 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2380 }
2381
2382 /*
2383 * Task switches in VMX non-root mode always cause task switches.
2384 * The new TSS must have been read and validated (DPL, limits etc.) before a
2385 * task-switch VM-exit commences.
2386 *
2387 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2388 */
2389 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2390 {
2391 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2392 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2393 }
2394
2395 /*
2396 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2397 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2398 */
2399 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2400 {
2401 uint64_t const uExitInfo1 = SelTss;
2402 uint64_t uExitInfo2 = uErr;
2403 switch (enmTaskSwitch)
2404 {
2405 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2406 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2407 default: break;
2408 }
2409 if (fFlags & IEM_XCPT_FLAGS_ERR)
2410 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2411 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2412 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2413
2414 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2415 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2416 RT_NOREF2(uExitInfo1, uExitInfo2);
2417 }
2418
2419 /*
2420 * Check the current TSS limit. The last written byte to the current TSS during the
2421 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2422 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2423 *
2424 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2425 * end up with smaller than "legal" TSS limits.
2426 */
2427 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2428 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2429 if (uCurTssLimit < uCurTssLimitMin)
2430 {
2431 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2432 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2433 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2434 }
2435
2436 /*
2437 * Verify that the new TSS can be accessed and map it. Map only the required contents
2438 * and not the entire TSS.
2439 */
2440 uint8_t bUnmapInfoNewTss;
2441 void *pvNewTss;
2442 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2443 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2444 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2445 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2446 * not perform correct translation if this happens. See Intel spec. 7.2.1
2447 * "Task-State Segment". */
2448 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2449/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2450 * Consider wrapping the remainder into a function for simpler cleanup. */
2451 if (rcStrict != VINF_SUCCESS)
2452 {
2453 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2454 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2455 return rcStrict;
2456 }
2457
2458 /*
2459 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2460 */
2461 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2462 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2463 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2464 {
2465 uint8_t bUnmapInfoDescCurTss;
2466 PX86DESC pDescCurTss;
2467 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2468 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2469 if (rcStrict != VINF_SUCCESS)
2470 {
2471 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2472 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2473 return rcStrict;
2474 }
2475
2476 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2477 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2478 if (rcStrict != VINF_SUCCESS)
2479 {
2480 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2481 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2482 return rcStrict;
2483 }
2484
2485 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2486 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2487 {
2488 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2489 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2490 fEFlags &= ~X86_EFL_NT;
2491 }
2492 }
2493
2494 /*
2495 * Save the CPU state into the current TSS.
2496 */
2497 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2498 if (GCPtrNewTss == GCPtrCurTss)
2499 {
2500 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2501 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2502 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2503 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2504 pVCpu->cpum.GstCtx.ldtr.Sel));
2505 }
2506 if (fIsNewTss386)
2507 {
2508 /*
2509 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2510 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2511 */
2512 uint8_t bUnmapInfoCurTss32;
2513 void *pvCurTss32;
2514 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2515 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2516 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2517 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2518 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2522 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525
2526 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2527 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2528 pCurTss32->eip = uNextEip;
2529 pCurTss32->eflags = fEFlags;
2530 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2531 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2532 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2533 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2534 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2535 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2536 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2537 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2538 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2539 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2540 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2541 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2542 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2543 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2544
2545 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2546 if (rcStrict != VINF_SUCCESS)
2547 {
2548 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2549 VBOXSTRICTRC_VAL(rcStrict)));
2550 return rcStrict;
2551 }
2552 }
2553 else
2554 {
2555 /*
2556 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2557 */
2558 uint8_t bUnmapInfoCurTss16;
2559 void *pvCurTss16;
2560 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2561 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2562 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2563 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2564 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2565 if (rcStrict != VINF_SUCCESS)
2566 {
2567 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2568 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2569 return rcStrict;
2570 }
2571
2572 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2573 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2574 pCurTss16->ip = uNextEip;
2575 pCurTss16->flags = (uint16_t)fEFlags;
2576 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2577 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2578 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2579 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2580 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2581 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2582 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2583 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2584 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2585 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2586 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2587 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2588
2589 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2590 if (rcStrict != VINF_SUCCESS)
2591 {
2592 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2593 VBOXSTRICTRC_VAL(rcStrict)));
2594 return rcStrict;
2595 }
2596 }
2597
2598 /*
2599 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2600 */
2601 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2602 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2603 {
2604 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2605 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2606 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2607 }
2608
2609 /*
2610 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2611 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2612 */
2613 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2614 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2615 bool fNewDebugTrap;
2616 if (fIsNewTss386)
2617 {
2618 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2619 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2620 uNewEip = pNewTss32->eip;
2621 uNewEflags = pNewTss32->eflags;
2622 uNewEax = pNewTss32->eax;
2623 uNewEcx = pNewTss32->ecx;
2624 uNewEdx = pNewTss32->edx;
2625 uNewEbx = pNewTss32->ebx;
2626 uNewEsp = pNewTss32->esp;
2627 uNewEbp = pNewTss32->ebp;
2628 uNewEsi = pNewTss32->esi;
2629 uNewEdi = pNewTss32->edi;
2630 uNewES = pNewTss32->es;
2631 uNewCS = pNewTss32->cs;
2632 uNewSS = pNewTss32->ss;
2633 uNewDS = pNewTss32->ds;
2634 uNewFS = pNewTss32->fs;
2635 uNewGS = pNewTss32->gs;
2636 uNewLdt = pNewTss32->selLdt;
2637 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2638 }
2639 else
2640 {
2641 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2642 uNewCr3 = 0;
2643 uNewEip = pNewTss16->ip;
2644 uNewEflags = pNewTss16->flags;
2645 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2646 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2647 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2648 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2649 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2650 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2651 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2652 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2653 uNewES = pNewTss16->es;
2654 uNewCS = pNewTss16->cs;
2655 uNewSS = pNewTss16->ss;
2656 uNewDS = pNewTss16->ds;
2657 uNewFS = 0;
2658 uNewGS = 0;
2659 uNewLdt = pNewTss16->selLdt;
2660 fNewDebugTrap = false;
2661 }
2662
2663 if (GCPtrNewTss == GCPtrCurTss)
2664 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2665 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2666
2667 /*
2668 * We're done accessing the new TSS.
2669 */
2670 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2671 if (rcStrict != VINF_SUCCESS)
2672 {
2673 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /*
2678 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2679 */
2680 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2681 {
2682 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2683 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2684 if (rcStrict != VINF_SUCCESS)
2685 {
2686 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2687 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2688 return rcStrict;
2689 }
2690
2691 /* Check that the descriptor indicates the new TSS is available (not busy). */
2692 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2693 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2694 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2695
2696 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2697 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2698 if (rcStrict != VINF_SUCCESS)
2699 {
2700 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2701 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2702 return rcStrict;
2703 }
2704 }
2705
2706 /*
2707 * From this point on, we're technically in the new task. We will defer exceptions
2708 * until the completion of the task switch but before executing any instructions in the new task.
2709 */
2710 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2711 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2712 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2713 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2714 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2715 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2716 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2717
2718 /* Set the busy bit in TR. */
2719 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2720
2721 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2722 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2723 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2724 {
2725 uNewEflags |= X86_EFL_NT;
2726 }
2727
2728 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2729 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2730 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2731
2732 pVCpu->cpum.GstCtx.eip = uNewEip;
2733 pVCpu->cpum.GstCtx.eax = uNewEax;
2734 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2735 pVCpu->cpum.GstCtx.edx = uNewEdx;
2736 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2737 pVCpu->cpum.GstCtx.esp = uNewEsp;
2738 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2739 pVCpu->cpum.GstCtx.esi = uNewEsi;
2740 pVCpu->cpum.GstCtx.edi = uNewEdi;
2741
2742 uNewEflags &= X86_EFL_LIVE_MASK;
2743 uNewEflags |= X86_EFL_RA1_MASK;
2744 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2745
2746 /*
2747 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2748 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2749 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2750 */
2751 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2752 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2753
2754 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2755 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2756
2757 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2758 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2759
2760 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2761 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2762
2763 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2764 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2765
2766 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2767 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2769
2770 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2771 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2774
2775 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2776 {
2777 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2778 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2779 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2780 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2781 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2782 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2783 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2784 }
2785
2786 /*
2787 * Switch CR3 for the new task.
2788 */
2789 if ( fIsNewTss386
2790 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2791 {
2792 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2793 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2794 AssertRCSuccessReturn(rc, rc);
2795
2796 /* Inform PGM. */
2797 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2798 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2799 AssertRCReturn(rc, rc);
2800 /* ignore informational status codes */
2801
2802 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2803 }
2804
2805 /*
2806 * Switch LDTR for the new task.
2807 */
2808 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2809 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2810 else
2811 {
2812 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2813
2814 IEMSELDESC DescNewLdt;
2815 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2816 if (rcStrict != VINF_SUCCESS)
2817 {
2818 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2819 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2820 return rcStrict;
2821 }
2822 if ( !DescNewLdt.Legacy.Gen.u1Present
2823 || DescNewLdt.Legacy.Gen.u1DescType
2824 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2825 {
2826 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2827 uNewLdt, DescNewLdt.Legacy.u));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2832 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2833 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2834 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2835 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2836 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2837 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2839 }
2840
2841 IEMSELDESC DescSS;
2842 if (IEM_IS_V86_MODE(pVCpu))
2843 {
2844 IEM_SET_CPL(pVCpu, 3);
2845 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2846 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2847 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2848 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2849 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2850 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2851
2852 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2853 DescSS.Legacy.u = 0;
2854 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2855 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2856 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2857 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2858 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2859 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2860 DescSS.Legacy.Gen.u2Dpl = 3;
2861 }
2862 else
2863 {
2864 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2865
2866 /*
2867 * Load the stack segment for the new task.
2868 */
2869 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2870 {
2871 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2872 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2873 }
2874
2875 /* Fetch the descriptor. */
2876 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2877 if (rcStrict != VINF_SUCCESS)
2878 {
2879 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2880 VBOXSTRICTRC_VAL(rcStrict)));
2881 return rcStrict;
2882 }
2883
2884 /* SS must be a data segment and writable. */
2885 if ( !DescSS.Legacy.Gen.u1DescType
2886 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2887 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2888 {
2889 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2890 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2891 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2895 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2896 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2897 {
2898 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2899 uNewCpl));
2900 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 /* Is it there? */
2904 if (!DescSS.Legacy.Gen.u1Present)
2905 {
2906 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2907 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2911 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2912
2913 /* Set the accessed bit before committing the result into SS. */
2914 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2915 {
2916 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2917 if (rcStrict != VINF_SUCCESS)
2918 return rcStrict;
2919 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2920 }
2921
2922 /* Commit SS. */
2923 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2924 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2925 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2926 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2927 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2928 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2929 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2930
2931 /* CPL has changed, update IEM before loading rest of segments. */
2932 IEM_SET_CPL(pVCpu, uNewCpl);
2933
2934 /*
2935 * Load the data segments for the new task.
2936 */
2937 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2941 if (rcStrict != VINF_SUCCESS)
2942 return rcStrict;
2943 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2944 if (rcStrict != VINF_SUCCESS)
2945 return rcStrict;
2946 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2947 if (rcStrict != VINF_SUCCESS)
2948 return rcStrict;
2949
2950 /*
2951 * Load the code segment for the new task.
2952 */
2953 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2954 {
2955 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2957 }
2958
2959 /* Fetch the descriptor. */
2960 IEMSELDESC DescCS;
2961 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2962 if (rcStrict != VINF_SUCCESS)
2963 {
2964 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2965 return rcStrict;
2966 }
2967
2968 /* CS must be a code segment. */
2969 if ( !DescCS.Legacy.Gen.u1DescType
2970 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2971 {
2972 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2973 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2974 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2975 }
2976
2977 /* For conforming CS, DPL must be less than or equal to the RPL. */
2978 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2979 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2980 {
2981 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2982 DescCS.Legacy.Gen.u2Dpl));
2983 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2984 }
2985
2986 /* For non-conforming CS, DPL must match RPL. */
2987 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2988 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2989 {
2990 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2991 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2992 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2993 }
2994
2995 /* Is it there? */
2996 if (!DescCS.Legacy.Gen.u1Present)
2997 {
2998 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2999 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3003 u64Base = X86DESC_BASE(&DescCS.Legacy);
3004
3005 /* Set the accessed bit before committing the result into CS. */
3006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3007 {
3008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3009 if (rcStrict != VINF_SUCCESS)
3010 return rcStrict;
3011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3012 }
3013
3014 /* Commit CS. */
3015 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3016 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3017 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3018 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3019 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3020 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3022 }
3023
3024 /* Make sure the CPU mode is correct. */
3025 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3026 if (fExecNew != pVCpu->iem.s.fExec)
3027 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3028 pVCpu->iem.s.fExec = fExecNew;
3029
3030 /** @todo Debug trap. */
3031 if (fIsNewTss386 && fNewDebugTrap)
3032 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3033
3034 /*
3035 * Construct the error code masks based on what caused this task switch.
3036 * See Intel Instruction reference for INT.
3037 */
3038 uint16_t uExt;
3039 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3040 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3041 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3042 uExt = 1;
3043 else
3044 uExt = 0;
3045
3046 /*
3047 * Push any error code on to the new stack.
3048 */
3049 if (fFlags & IEM_XCPT_FLAGS_ERR)
3050 {
3051 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3052 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3053 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3054
3055 /* Check that there is sufficient space on the stack. */
3056 /** @todo Factor out segment limit checking for normal/expand down segments
3057 * into a separate function. */
3058 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3059 {
3060 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3061 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3062 {
3063 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3064 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3065 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3066 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3067 }
3068 }
3069 else
3070 {
3071 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3072 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3073 {
3074 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3075 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3076 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3077 }
3078 }
3079
3080
3081 if (fIsNewTss386)
3082 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3083 else
3084 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3085 if (rcStrict != VINF_SUCCESS)
3086 {
3087 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3088 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3089 return rcStrict;
3090 }
3091 }
3092
3093 /* Check the new EIP against the new CS limit. */
3094 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3095 {
3096 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3097 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3098 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3099 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3100 }
3101
3102 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3103 pVCpu->cpum.GstCtx.ss.Sel));
3104 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3105}
3106
3107
3108/**
3109 * Implements exceptions and interrupts for protected mode.
3110 *
3111 * @returns VBox strict status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param cbInstr The number of bytes to offset rIP by in the return
3114 * address.
3115 * @param u8Vector The interrupt / exception vector number.
3116 * @param fFlags The flags.
3117 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3118 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3119 */
3120static VBOXSTRICTRC
3121iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3122 uint8_t cbInstr,
3123 uint8_t u8Vector,
3124 uint32_t fFlags,
3125 uint16_t uErr,
3126 uint64_t uCr2) RT_NOEXCEPT
3127{
3128 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3129
3130 /*
3131 * Read the IDT entry.
3132 */
3133 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3134 {
3135 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3136 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3137 }
3138 X86DESC Idte;
3139 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3140 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3141 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3142 {
3143 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3144 return rcStrict;
3145 }
3146 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3147 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3148 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3149 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3150
3151 /*
3152 * Check the descriptor type, DPL and such.
3153 * ASSUMES this is done in the same order as described for call-gate calls.
3154 */
3155 if (Idte.Gate.u1DescType)
3156 {
3157 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3159 }
3160 bool fTaskGate = false;
3161 uint8_t f32BitGate = true;
3162 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3163 switch (Idte.Gate.u4Type)
3164 {
3165 case X86_SEL_TYPE_SYS_UNDEFINED:
3166 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3167 case X86_SEL_TYPE_SYS_LDT:
3168 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3169 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3170 case X86_SEL_TYPE_SYS_UNDEFINED2:
3171 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3172 case X86_SEL_TYPE_SYS_UNDEFINED3:
3173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3174 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3175 case X86_SEL_TYPE_SYS_UNDEFINED4:
3176 {
3177 /** @todo check what actually happens when the type is wrong...
3178 * esp. call gates. */
3179 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3180 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3181 }
3182
3183 case X86_SEL_TYPE_SYS_286_INT_GATE:
3184 f32BitGate = false;
3185 RT_FALL_THRU();
3186 case X86_SEL_TYPE_SYS_386_INT_GATE:
3187 fEflToClear |= X86_EFL_IF;
3188 break;
3189
3190 case X86_SEL_TYPE_SYS_TASK_GATE:
3191 fTaskGate = true;
3192#ifndef IEM_IMPLEMENTS_TASKSWITCH
3193 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3194#endif
3195 break;
3196
3197 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3198 f32BitGate = false;
3199 break;
3200 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3201 break;
3202
3203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3204 }
3205
3206 /* Check DPL against CPL if applicable. */
3207 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3208 {
3209 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3210 {
3211 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3212 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3213 }
3214 }
3215
3216 /* Is it there? */
3217 if (!Idte.Gate.u1Present)
3218 {
3219 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3220 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3221 }
3222
3223 /* Is it a task-gate? */
3224 if (fTaskGate)
3225 {
3226 /*
3227 * Construct the error code masks based on what caused this task switch.
3228 * See Intel Instruction reference for INT.
3229 */
3230 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3231 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3232 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3233 RTSEL SelTss = Idte.Gate.u16Sel;
3234
3235 /*
3236 * Fetch the TSS descriptor in the GDT.
3237 */
3238 IEMSELDESC DescTSS;
3239 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3240 if (rcStrict != VINF_SUCCESS)
3241 {
3242 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3243 VBOXSTRICTRC_VAL(rcStrict)));
3244 return rcStrict;
3245 }
3246
3247 /* The TSS descriptor must be a system segment and be available (not busy). */
3248 if ( DescTSS.Legacy.Gen.u1DescType
3249 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3250 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3251 {
3252 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3253 u8Vector, SelTss, DescTSS.Legacy.au64));
3254 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3255 }
3256
3257 /* The TSS must be present. */
3258 if (!DescTSS.Legacy.Gen.u1Present)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3261 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3262 }
3263
3264 /* Do the actual task switch. */
3265 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3266 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3267 fFlags, uErr, uCr2, SelTss, &DescTSS);
3268 }
3269
3270 /* A null CS is bad. */
3271 RTSEL NewCS = Idte.Gate.u16Sel;
3272 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3273 {
3274 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3275 return iemRaiseGeneralProtectionFault0(pVCpu);
3276 }
3277
3278 /* Fetch the descriptor for the new CS. */
3279 IEMSELDESC DescCS;
3280 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3281 if (rcStrict != VINF_SUCCESS)
3282 {
3283 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3284 return rcStrict;
3285 }
3286
3287 /* Must be a code segment. */
3288 if (!DescCS.Legacy.Gen.u1DescType)
3289 {
3290 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3291 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3292 }
3293 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3294 {
3295 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3296 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3297 }
3298
3299 /* Don't allow lowering the privilege level. */
3300 /** @todo Does the lowering of privileges apply to software interrupts
3301 * only? This has bearings on the more-privileged or
3302 * same-privilege stack behavior further down. A testcase would
3303 * be nice. */
3304 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3305 {
3306 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3307 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3308 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3309 }
3310
3311 /* Make sure the selector is present. */
3312 if (!DescCS.Legacy.Gen.u1Present)
3313 {
3314 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3315 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3316 }
3317
3318#ifdef LOG_ENABLED
3319 /* If software interrupt, try decode it if logging is enabled and such. */
3320 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3321 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3322 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3323#endif
3324
3325 /* Check the new EIP against the new CS limit. */
3326 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3327 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3328 ? Idte.Gate.u16OffsetLow
3329 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3330 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3331 if (uNewEip > cbLimitCS)
3332 {
3333 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3334 u8Vector, uNewEip, cbLimitCS, NewCS));
3335 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3336 }
3337 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3338
3339 /* Calc the flag image to push. */
3340 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3341 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3342 fEfl &= ~X86_EFL_RF;
3343 else
3344 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3345
3346 /* From V8086 mode only go to CPL 0. */
3347 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3348 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3349 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3350 {
3351 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3352 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3353 }
3354
3355 /*
3356 * If the privilege level changes, we need to get a new stack from the TSS.
3357 * This in turns means validating the new SS and ESP...
3358 */
3359 if (uNewCpl != IEM_GET_CPL(pVCpu))
3360 {
3361 RTSEL NewSS;
3362 uint32_t uNewEsp;
3363 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3364 if (rcStrict != VINF_SUCCESS)
3365 return rcStrict;
3366
3367 IEMSELDESC DescSS;
3368 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3369 if (rcStrict != VINF_SUCCESS)
3370 return rcStrict;
3371 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3372 if (!DescSS.Legacy.Gen.u1DefBig)
3373 {
3374 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3375 uNewEsp = (uint16_t)uNewEsp;
3376 }
3377
3378 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3379
3380 /* Check that there is sufficient space for the stack frame. */
3381 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3382 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3383 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3384 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3385
3386 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3387 {
3388 if ( uNewEsp - 1 > cbLimitSS
3389 || uNewEsp < cbStackFrame)
3390 {
3391 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3392 u8Vector, NewSS, uNewEsp, cbStackFrame));
3393 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3394 }
3395 }
3396 else
3397 {
3398 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3399 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3400 {
3401 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3402 u8Vector, NewSS, uNewEsp, cbStackFrame));
3403 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3404 }
3405 }
3406
3407 /*
3408 * Start making changes.
3409 */
3410
3411 /* Set the new CPL so that stack accesses use it. */
3412 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3413 IEM_SET_CPL(pVCpu, uNewCpl);
3414
3415 /* Create the stack frame. */
3416 uint8_t bUnmapInfoStackFrame;
3417 RTPTRUNION uStackFrame;
3418 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3419 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3420 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3421 if (rcStrict != VINF_SUCCESS)
3422 return rcStrict;
3423 if (f32BitGate)
3424 {
3425 if (fFlags & IEM_XCPT_FLAGS_ERR)
3426 *uStackFrame.pu32++ = uErr;
3427 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3428 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3429 uStackFrame.pu32[2] = fEfl;
3430 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3431 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3432 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3433 if (fEfl & X86_EFL_VM)
3434 {
3435 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3436 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3437 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3438 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3439 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3440 }
3441 }
3442 else
3443 {
3444 if (fFlags & IEM_XCPT_FLAGS_ERR)
3445 *uStackFrame.pu16++ = uErr;
3446 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3447 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3448 uStackFrame.pu16[2] = fEfl;
3449 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3450 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3451 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3452 if (fEfl & X86_EFL_VM)
3453 {
3454 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3455 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3456 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3457 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3458 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3459 }
3460 }
3461 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3462 if (rcStrict != VINF_SUCCESS)
3463 return rcStrict;
3464
3465 /* Mark the selectors 'accessed' (hope this is the correct time). */
3466 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3467 * after pushing the stack frame? (Write protect the gdt + stack to
3468 * find out.) */
3469 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3470 {
3471 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3472 if (rcStrict != VINF_SUCCESS)
3473 return rcStrict;
3474 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3475 }
3476
3477 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3478 {
3479 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3480 if (rcStrict != VINF_SUCCESS)
3481 return rcStrict;
3482 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3483 }
3484
3485 /*
3486 * Start comitting the register changes (joins with the DPL=CPL branch).
3487 */
3488 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3489 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3490 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3491 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3492 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3493 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3494 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3495 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3496 * SP is loaded).
3497 * Need to check the other combinations too:
3498 * - 16-bit TSS, 32-bit handler
3499 * - 32-bit TSS, 16-bit handler */
3500 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3501 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3502 else
3503 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3504
3505 if (fEfl & X86_EFL_VM)
3506 {
3507 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3508 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3509 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3510 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3511 }
3512 }
3513 /*
3514 * Same privilege, no stack change and smaller stack frame.
3515 */
3516 else
3517 {
3518 uint64_t uNewRsp;
3519 uint8_t bUnmapInfoStackFrame;
3520 RTPTRUNION uStackFrame;
3521 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3522 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3523 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3524 if (rcStrict != VINF_SUCCESS)
3525 return rcStrict;
3526
3527 if (f32BitGate)
3528 {
3529 if (fFlags & IEM_XCPT_FLAGS_ERR)
3530 *uStackFrame.pu32++ = uErr;
3531 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3532 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3533 uStackFrame.pu32[2] = fEfl;
3534 }
3535 else
3536 {
3537 if (fFlags & IEM_XCPT_FLAGS_ERR)
3538 *uStackFrame.pu16++ = uErr;
3539 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3540 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3541 uStackFrame.pu16[2] = fEfl;
3542 }
3543 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3544 if (rcStrict != VINF_SUCCESS)
3545 return rcStrict;
3546
3547 /* Mark the CS selector as 'accessed'. */
3548 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3549 {
3550 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3554 }
3555
3556 /*
3557 * Start committing the register changes (joins with the other branch).
3558 */
3559 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3560 }
3561
3562 /* ... register committing continues. */
3563 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3564 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3565 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3566 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3567 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3568 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3569
3570 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3571 fEfl &= ~fEflToClear;
3572 IEMMISC_SET_EFL(pVCpu, fEfl);
3573
3574 if (fFlags & IEM_XCPT_FLAGS_CR2)
3575 pVCpu->cpum.GstCtx.cr2 = uCr2;
3576
3577 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3578 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3579
3580 /* Make sure the execution flags are correct. */
3581 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3582 if (fExecNew != pVCpu->iem.s.fExec)
3583 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3584 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3585 pVCpu->iem.s.fExec = fExecNew;
3586 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3587
3588 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3589}
3590
3591
3592/**
3593 * Implements exceptions and interrupts for long mode.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param cbInstr The number of bytes to offset rIP by in the return
3598 * address.
3599 * @param u8Vector The interrupt / exception vector number.
3600 * @param fFlags The flags.
3601 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3602 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3603 */
3604static VBOXSTRICTRC
3605iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3606 uint8_t cbInstr,
3607 uint8_t u8Vector,
3608 uint32_t fFlags,
3609 uint16_t uErr,
3610 uint64_t uCr2) RT_NOEXCEPT
3611{
3612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3613
3614 /*
3615 * Read the IDT entry.
3616 */
3617 uint16_t offIdt = (uint16_t)u8Vector << 4;
3618 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3619 {
3620 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3621 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3622 }
3623 X86DESC64 Idte;
3624#ifdef _MSC_VER /* Shut up silly compiler warning. */
3625 Idte.au64[0] = 0;
3626 Idte.au64[1] = 0;
3627#endif
3628 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3629 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3630 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3631 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3632 {
3633 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3634 return rcStrict;
3635 }
3636 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3637 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3638 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3639
3640 /*
3641 * Check the descriptor type, DPL and such.
3642 * ASSUMES this is done in the same order as described for call-gate calls.
3643 */
3644 if (Idte.Gate.u1DescType)
3645 {
3646 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3647 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3648 }
3649 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3650 switch (Idte.Gate.u4Type)
3651 {
3652 case AMD64_SEL_TYPE_SYS_INT_GATE:
3653 fEflToClear |= X86_EFL_IF;
3654 break;
3655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3656 break;
3657
3658 default:
3659 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3660 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3661 }
3662
3663 /* Check DPL against CPL if applicable. */
3664 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3665 {
3666 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3667 {
3668 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3669 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3670 }
3671 }
3672
3673 /* Is it there? */
3674 if (!Idte.Gate.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3677 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3678 }
3679
3680 /* A null CS is bad. */
3681 RTSEL NewCS = Idte.Gate.u16Sel;
3682 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3683 {
3684 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3685 return iemRaiseGeneralProtectionFault0(pVCpu);
3686 }
3687
3688 /* Fetch the descriptor for the new CS. */
3689 IEMSELDESC DescCS;
3690 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3691 if (rcStrict != VINF_SUCCESS)
3692 {
3693 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3694 return rcStrict;
3695 }
3696
3697 /* Must be a 64-bit code segment. */
3698 if (!DescCS.Long.Gen.u1DescType)
3699 {
3700 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3701 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3702 }
3703 if ( !DescCS.Long.Gen.u1Long
3704 || DescCS.Long.Gen.u1DefBig
3705 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3708 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3709 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3710 }
3711
3712 /* Don't allow lowering the privilege level. For non-conforming CS
3713 selectors, the CS.DPL sets the privilege level the trap/interrupt
3714 handler runs at. For conforming CS selectors, the CPL remains
3715 unchanged, but the CS.DPL must be <= CPL. */
3716 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3717 * when CPU in Ring-0. Result \#GP? */
3718 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3719 {
3720 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3721 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3722 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3723 }
3724
3725
3726 /* Make sure the selector is present. */
3727 if (!DescCS.Legacy.Gen.u1Present)
3728 {
3729 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3730 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3731 }
3732
3733 /* Check that the new RIP is canonical. */
3734 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3735 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3736 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3737 if (!IEM_IS_CANONICAL(uNewRip))
3738 {
3739 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3740 return iemRaiseGeneralProtectionFault0(pVCpu);
3741 }
3742
3743 /*
3744 * If the privilege level changes or if the IST isn't zero, we need to get
3745 * a new stack from the TSS.
3746 */
3747 uint64_t uNewRsp;
3748 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3749 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3750 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3751 || Idte.Gate.u3IST != 0)
3752 {
3753 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3754 if (rcStrict != VINF_SUCCESS)
3755 return rcStrict;
3756 }
3757 else
3758 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3759 uNewRsp &= ~(uint64_t)0xf;
3760
3761 /*
3762 * Calc the flag image to push.
3763 */
3764 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3765 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3766 fEfl &= ~X86_EFL_RF;
3767 else
3768 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3769
3770 /*
3771 * Start making changes.
3772 */
3773 /* Set the new CPL so that stack accesses use it. */
3774 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3775 IEM_SET_CPL(pVCpu, uNewCpl);
3776/** @todo Setting CPL this early seems wrong as it would affect and errors we
3777 * raise accessing the stack and (?) GDT/LDT... */
3778
3779 /* Create the stack frame. */
3780 uint8_t bUnmapInfoStackFrame;
3781 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3782 RTPTRUNION uStackFrame;
3783 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3784 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787
3788 if (fFlags & IEM_XCPT_FLAGS_ERR)
3789 *uStackFrame.pu64++ = uErr;
3790 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3791 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3792 uStackFrame.pu64[2] = fEfl;
3793 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3794 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3795 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3796 if (rcStrict != VINF_SUCCESS)
3797 return rcStrict;
3798
3799 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3800 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3801 * after pushing the stack frame? (Write protect the gdt + stack to
3802 * find out.) */
3803 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3804 {
3805 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3806 if (rcStrict != VINF_SUCCESS)
3807 return rcStrict;
3808 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3809 }
3810
3811 /*
3812 * Start comitting the register changes.
3813 */
3814 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3815 * hidden registers when interrupting 32-bit or 16-bit code! */
3816 if (uNewCpl != uOldCpl)
3817 {
3818 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3819 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3820 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3821 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3822 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3823 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3824 }
3825 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3826 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3827 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3828 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3829 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3830 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3831 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3832 pVCpu->cpum.GstCtx.rip = uNewRip;
3833
3834 fEfl &= ~fEflToClear;
3835 IEMMISC_SET_EFL(pVCpu, fEfl);
3836
3837 if (fFlags & IEM_XCPT_FLAGS_CR2)
3838 pVCpu->cpum.GstCtx.cr2 = uCr2;
3839
3840 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3841 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3842
3843 iemRecalcExecModeAndCplFlags(pVCpu);
3844
3845 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3846}
3847
3848
3849/**
3850 * Implements exceptions and interrupts.
3851 *
3852 * All exceptions and interrupts goes thru this function!
3853 *
3854 * @returns VBox strict status code.
3855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3856 * @param cbInstr The number of bytes to offset rIP by in the return
3857 * address.
3858 * @param u8Vector The interrupt / exception vector number.
3859 * @param fFlags The flags.
3860 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3861 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3862 */
3863VBOXSTRICTRC
3864iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3865 uint8_t cbInstr,
3866 uint8_t u8Vector,
3867 uint32_t fFlags,
3868 uint16_t uErr,
3869 uint64_t uCr2) RT_NOEXCEPT
3870{
3871 /*
3872 * Get all the state that we might need here.
3873 */
3874 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3876
3877#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3878 /*
3879 * Flush prefetch buffer
3880 */
3881 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3882#endif
3883
3884 /*
3885 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3886 */
3887 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3888 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3889 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3890 | IEM_XCPT_FLAGS_BP_INSTR
3891 | IEM_XCPT_FLAGS_ICEBP_INSTR
3892 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3893 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3894 {
3895 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3896 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3897 u8Vector = X86_XCPT_GP;
3898 uErr = 0;
3899 }
3900
3901 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3902#ifdef DBGFTRACE_ENABLED
3903 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3904 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3905 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3906#endif
3907
3908 /*
3909 * Check if DBGF wants to intercept the exception.
3910 */
3911 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3912 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3913 { /* likely */ }
3914 else
3915 {
3916 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3917 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3918 if (rcStrict != VINF_SUCCESS)
3919 return rcStrict;
3920 }
3921
3922 /*
3923 * Evaluate whether NMI blocking should be in effect.
3924 * Normally, NMI blocking is in effect whenever we inject an NMI.
3925 */
3926 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3927 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3928
3929#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3930 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3931 {
3932 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3933 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3934 return rcStrict0;
3935
3936 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3937 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3938 {
3939 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3940 fBlockNmi = false;
3941 }
3942 }
3943#endif
3944
3945#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3946 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3947 {
3948 /*
3949 * If the event is being injected as part of VMRUN, it isn't subject to event
3950 * intercepts in the nested-guest. However, secondary exceptions that occur
3951 * during injection of any event -are- subject to exception intercepts.
3952 *
3953 * See AMD spec. 15.20 "Event Injection".
3954 */
3955 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3956 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3957 else
3958 {
3959 /*
3960 * Check and handle if the event being raised is intercepted.
3961 */
3962 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3963 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3964 return rcStrict0;
3965 }
3966 }
3967#endif
3968
3969 /*
3970 * Set NMI blocking if necessary.
3971 */
3972 if (fBlockNmi)
3973 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3974
3975 /*
3976 * Do recursion accounting.
3977 */
3978 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3979 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3980 if (pVCpu->iem.s.cXcptRecursions == 0)
3981 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3982 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3983 else
3984 {
3985 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3986 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3987 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3988
3989 if (pVCpu->iem.s.cXcptRecursions >= 4)
3990 {
3991#ifdef DEBUG_bird
3992 AssertFailed();
3993#endif
3994 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3995 }
3996
3997 /*
3998 * Evaluate the sequence of recurring events.
3999 */
4000 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4001 NULL /* pXcptRaiseInfo */);
4002 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4003 { /* likely */ }
4004 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4005 {
4006 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4007 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4008 u8Vector = X86_XCPT_DF;
4009 uErr = 0;
4010#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4011 /* VMX nested-guest #DF intercept needs to be checked here. */
4012 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4013 {
4014 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4015 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4016 return rcStrict0;
4017 }
4018#endif
4019 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4020 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4021 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4022 }
4023 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4024 {
4025 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4026 return iemInitiateCpuShutdown(pVCpu);
4027 }
4028 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4029 {
4030 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4031 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4032 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4033 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4034 return VERR_EM_GUEST_CPU_HANG;
4035 }
4036 else
4037 {
4038 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4039 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4040 return VERR_IEM_IPE_9;
4041 }
4042
4043 /*
4044 * The 'EXT' bit is set when an exception occurs during deliver of an external
4045 * event (such as an interrupt or earlier exception)[1]. Privileged software
4046 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4047 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4048 *
4049 * [1] - Intel spec. 6.13 "Error Code"
4050 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4051 * [3] - Intel Instruction reference for INT n.
4052 */
4053 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4054 && (fFlags & IEM_XCPT_FLAGS_ERR)
4055 && u8Vector != X86_XCPT_PF
4056 && u8Vector != X86_XCPT_DF)
4057 {
4058 uErr |= X86_TRAP_ERR_EXTERNAL;
4059 }
4060 }
4061
4062 pVCpu->iem.s.cXcptRecursions++;
4063 pVCpu->iem.s.uCurXcpt = u8Vector;
4064 pVCpu->iem.s.fCurXcpt = fFlags;
4065 pVCpu->iem.s.uCurXcptErr = uErr;
4066 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4067
4068 /*
4069 * Extensive logging.
4070 */
4071#if defined(LOG_ENABLED) && defined(IN_RING3)
4072 if (LogIs3Enabled())
4073 {
4074 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4075 char szRegs[4096];
4076 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4077 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4078 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4079 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4080 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4081 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4082 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4083 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4084 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4085 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4086 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4087 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4088 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4089 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4090 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4091 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4092 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4093 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4094 " efer=%016VR{efer}\n"
4095 " pat=%016VR{pat}\n"
4096 " sf_mask=%016VR{sf_mask}\n"
4097 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4098 " lstar=%016VR{lstar}\n"
4099 " star=%016VR{star} cstar=%016VR{cstar}\n"
4100 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4101 );
4102
4103 char szInstr[256];
4104 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4105 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4106 szInstr, sizeof(szInstr), NULL);
4107 Log3(("%s%s\n", szRegs, szInstr));
4108 }
4109#endif /* LOG_ENABLED */
4110
4111 /*
4112 * Stats.
4113 */
4114 uint64_t const uTimestamp = ASMReadTSC();
4115 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4116 {
4117 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4118 EMHistoryAddExit(pVCpu,
4119 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4120 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4121 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4122 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4123 }
4124 else
4125 {
4126 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4127 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4128 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4129 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4130 if (fFlags & IEM_XCPT_FLAGS_ERR)
4131 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4132 if (fFlags & IEM_XCPT_FLAGS_CR2)
4133 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4134 }
4135
4136 /*
4137 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4138 * to ensure that a stale TLB or paging cache entry will only cause one
4139 * spurious #PF.
4140 */
4141 if ( u8Vector == X86_XCPT_PF
4142 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4143 IEMTlbInvalidatePage(pVCpu, uCr2);
4144
4145 /*
4146 * Call the mode specific worker function.
4147 */
4148 VBOXSTRICTRC rcStrict;
4149 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4150 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4151 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4152 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4153 else
4154 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4155
4156 /* Flush the prefetch buffer. */
4157 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4158
4159 /*
4160 * Unwind.
4161 */
4162 pVCpu->iem.s.cXcptRecursions--;
4163 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4164 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4165 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4166 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4167 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4168 return rcStrict;
4169}
4170
4171#ifdef IEM_WITH_SETJMP
4172/**
4173 * See iemRaiseXcptOrInt. Will not return.
4174 */
4175DECL_NO_RETURN(void)
4176iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4177 uint8_t cbInstr,
4178 uint8_t u8Vector,
4179 uint32_t fFlags,
4180 uint16_t uErr,
4181 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4182{
4183 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4184 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4185}
4186#endif
4187
4188
4189/** \#DE - 00. */
4190VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4191{
4192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4193}
4194
4195
4196/** \#DB - 01.
4197 * @note This automatically clear DR7.GD. */
4198VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4199{
4200 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4201 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4203}
4204
4205
4206/** \#BR - 05. */
4207VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4210}
4211
4212
4213/** \#UD - 06. */
4214VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4215{
4216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4217}
4218
4219
4220#ifdef IEM_WITH_SETJMP
4221/** \#UD - 06. */
4222DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4223{
4224 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4225}
4226#endif
4227
4228
4229/** \#NM - 07. */
4230VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4233}
4234
4235
4236#ifdef IEM_WITH_SETJMP
4237/** \#NM - 07. */
4238DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4239{
4240 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4241}
4242#endif
4243
4244
4245/** \#TS(err) - 0a. */
4246VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4247{
4248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4249}
4250
4251
4252/** \#TS(tr) - 0a. */
4253VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4254{
4255 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4256 pVCpu->cpum.GstCtx.tr.Sel, 0);
4257}
4258
4259
4260/** \#TS(0) - 0a. */
4261VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4262{
4263 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4264 0, 0);
4265}
4266
4267
4268/** \#TS(err) - 0a. */
4269VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4270{
4271 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4272 uSel & X86_SEL_MASK_OFF_RPL, 0);
4273}
4274
4275
4276/** \#NP(err) - 0b. */
4277VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4278{
4279 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4280}
4281
4282
4283/** \#NP(sel) - 0b. */
4284VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4285{
4286 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4287 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4288 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4289 uSel & ~X86_SEL_RPL, 0);
4290}
4291
4292
4293/** \#SS(seg) - 0c. */
4294VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4295{
4296 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4297 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4299 uSel & ~X86_SEL_RPL, 0);
4300}
4301
4302
4303/** \#SS(err) - 0c. */
4304VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4305{
4306 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4307 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4308 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4309}
4310
4311
4312/** \#GP(n) - 0d. */
4313VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4314{
4315 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4316 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4317}
4318
4319
4320/** \#GP(0) - 0d. */
4321VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4322{
4323 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4324 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4325}
4326
4327#ifdef IEM_WITH_SETJMP
4328/** \#GP(0) - 0d. */
4329DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4330{
4331 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4332 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4333}
4334#endif
4335
4336
4337/** \#GP(sel) - 0d. */
4338VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4339{
4340 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4341 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4342 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4343 Sel & ~X86_SEL_RPL, 0);
4344}
4345
4346
4347/** \#GP(0) - 0d. */
4348VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4349{
4350 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4351 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4352}
4353
4354
4355/** \#GP(sel) - 0d. */
4356VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4357{
4358 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4359 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4360 NOREF(iSegReg); NOREF(fAccess);
4361 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4362 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4363}
4364
4365#ifdef IEM_WITH_SETJMP
4366/** \#GP(sel) - 0d, longjmp. */
4367DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4368{
4369 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4370 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4371 NOREF(iSegReg); NOREF(fAccess);
4372 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4373 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4374}
4375#endif
4376
4377/** \#GP(sel) - 0d. */
4378VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4379{
4380 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4381 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4382 NOREF(Sel);
4383 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4384}
4385
4386#ifdef IEM_WITH_SETJMP
4387/** \#GP(sel) - 0d, longjmp. */
4388DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4389{
4390 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4391 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4392 NOREF(Sel);
4393 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4394}
4395#endif
4396
4397
4398/** \#GP(sel) - 0d. */
4399VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4400{
4401 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4402 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4403 NOREF(iSegReg); NOREF(fAccess);
4404 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4405}
4406
4407#ifdef IEM_WITH_SETJMP
4408/** \#GP(sel) - 0d, longjmp. */
4409DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4410{
4411 NOREF(iSegReg); NOREF(fAccess);
4412 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4413}
4414#endif
4415
4416
4417/** \#PF(n) - 0e. */
4418VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4419{
4420 uint16_t uErr;
4421 switch (rc)
4422 {
4423 case VERR_PAGE_NOT_PRESENT:
4424 case VERR_PAGE_TABLE_NOT_PRESENT:
4425 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4426 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4427 uErr = 0;
4428 break;
4429
4430 default:
4431 AssertMsgFailed(("%Rrc\n", rc));
4432 RT_FALL_THRU();
4433 case VERR_ACCESS_DENIED:
4434 uErr = X86_TRAP_PF_P;
4435 break;
4436
4437 /** @todo reserved */
4438 }
4439
4440 if (IEM_GET_CPL(pVCpu) == 3)
4441 uErr |= X86_TRAP_PF_US;
4442
4443 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4444 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4445 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4446 uErr |= X86_TRAP_PF_ID;
4447
4448#if 0 /* This is so much non-sense, really. Why was it done like that? */
4449 /* Note! RW access callers reporting a WRITE protection fault, will clear
4450 the READ flag before calling. So, read-modify-write accesses (RW)
4451 can safely be reported as READ faults. */
4452 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4453 uErr |= X86_TRAP_PF_RW;
4454#else
4455 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4456 {
4457 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4458 /// (regardless of outcome of the comparison in the latter case).
4459 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4460 uErr |= X86_TRAP_PF_RW;
4461 }
4462#endif
4463
4464 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4465 of the memory operand rather than at the start of it. (Not sure what
4466 happens if it crosses a page boundrary.) The current heuristics for
4467 this is to report the #PF for the last byte if the access is more than
4468 64 bytes. This is probably not correct, but we can work that out later,
4469 main objective now is to get FXSAVE to work like for real hardware and
4470 make bs3-cpu-basic2 work. */
4471 if (cbAccess <= 64)
4472 { /* likely*/ }
4473 else
4474 GCPtrWhere += cbAccess - 1;
4475
4476 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4477 uErr, GCPtrWhere);
4478}
4479
4480#ifdef IEM_WITH_SETJMP
4481/** \#PF(n) - 0e, longjmp. */
4482DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4483 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4484{
4485 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4486}
4487#endif
4488
4489
4490/** \#MF(0) - 10. */
4491VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4492{
4493 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4494 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4495
4496 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4497 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4498 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4499}
4500
4501#ifdef IEM_WITH_SETJMP
4502/** \#MF(0) - 10, longjmp. */
4503DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4504{
4505 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4506}
4507#endif
4508
4509
4510/** \#AC(0) - 11. */
4511VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4512{
4513 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4514}
4515
4516#ifdef IEM_WITH_SETJMP
4517/** \#AC(0) - 11, longjmp. */
4518DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4519{
4520 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4521}
4522#endif
4523
4524
4525/** \#XF(0)/\#XM(0) - 19. */
4526VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4527{
4528 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4529}
4530
4531
4532/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4533IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4534{
4535 NOREF(cbInstr);
4536 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4537}
4538
4539
4540/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4541IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4542{
4543 NOREF(cbInstr);
4544 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4545}
4546
4547
4548/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4549IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4550{
4551 NOREF(cbInstr);
4552 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4553}
4554
4555
4556/** @} */
4557
4558/** @name Common opcode decoders.
4559 * @{
4560 */
4561//#include <iprt/mem.h>
4562
4563/**
4564 * Used to add extra details about a stub case.
4565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4566 */
4567void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4568{
4569#if defined(LOG_ENABLED) && defined(IN_RING3)
4570 PVM pVM = pVCpu->CTX_SUFF(pVM);
4571 char szRegs[4096];
4572 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4573 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4574 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4575 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4576 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4577 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4578 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4579 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4580 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4581 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4582 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4583 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4584 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4585 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4586 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4587 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4588 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4589 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4590 " efer=%016VR{efer}\n"
4591 " pat=%016VR{pat}\n"
4592 " sf_mask=%016VR{sf_mask}\n"
4593 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4594 " lstar=%016VR{lstar}\n"
4595 " star=%016VR{star} cstar=%016VR{cstar}\n"
4596 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4597 );
4598
4599 char szInstr[256];
4600 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4601 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4602 szInstr, sizeof(szInstr), NULL);
4603
4604 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4605#else
4606 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4607#endif
4608}
4609
4610/** @} */
4611
4612
4613
4614/** @name Register Access.
4615 * @{
4616 */
4617
4618/**
4619 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4620 *
4621 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4622 * segment limit.
4623 *
4624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4625 * @param cbInstr Instruction size.
4626 * @param offNextInstr The offset of the next instruction.
4627 * @param enmEffOpSize Effective operand size.
4628 */
4629VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4630 IEMMODE enmEffOpSize) RT_NOEXCEPT
4631{
4632 switch (enmEffOpSize)
4633 {
4634 case IEMMODE_16BIT:
4635 {
4636 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4637 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4638 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4639 pVCpu->cpum.GstCtx.rip = uNewIp;
4640 else
4641 return iemRaiseGeneralProtectionFault0(pVCpu);
4642 break;
4643 }
4644
4645 case IEMMODE_32BIT:
4646 {
4647 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4648 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4649
4650 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4651 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4652 pVCpu->cpum.GstCtx.rip = uNewEip;
4653 else
4654 return iemRaiseGeneralProtectionFault0(pVCpu);
4655 break;
4656 }
4657
4658 case IEMMODE_64BIT:
4659 {
4660 Assert(IEM_IS_64BIT_CODE(pVCpu));
4661
4662 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4663 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4664 pVCpu->cpum.GstCtx.rip = uNewRip;
4665 else
4666 return iemRaiseGeneralProtectionFault0(pVCpu);
4667 break;
4668 }
4669
4670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4671 }
4672
4673#ifndef IEM_WITH_CODE_TLB
4674 /* Flush the prefetch buffer. */
4675 pVCpu->iem.s.cbOpcode = cbInstr;
4676#endif
4677
4678 /*
4679 * Clear RF and finish the instruction (maybe raise #DB).
4680 */
4681 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4682}
4683
4684
4685/**
4686 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4687 *
4688 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4689 * segment limit.
4690 *
4691 * @returns Strict VBox status code.
4692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4693 * @param cbInstr Instruction size.
4694 * @param offNextInstr The offset of the next instruction.
4695 */
4696VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4697{
4698 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4699
4700 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4701 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4702 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4703 pVCpu->cpum.GstCtx.rip = uNewIp;
4704 else
4705 return iemRaiseGeneralProtectionFault0(pVCpu);
4706
4707#ifndef IEM_WITH_CODE_TLB
4708 /* Flush the prefetch buffer. */
4709 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4710#endif
4711
4712 /*
4713 * Clear RF and finish the instruction (maybe raise #DB).
4714 */
4715 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4716}
4717
4718
4719/**
4720 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4721 *
4722 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4723 * segment limit.
4724 *
4725 * @returns Strict VBox status code.
4726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4727 * @param cbInstr Instruction size.
4728 * @param offNextInstr The offset of the next instruction.
4729 * @param enmEffOpSize Effective operand size.
4730 */
4731VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4732 IEMMODE enmEffOpSize) RT_NOEXCEPT
4733{
4734 if (enmEffOpSize == IEMMODE_32BIT)
4735 {
4736 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4737
4738 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4739 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4740 pVCpu->cpum.GstCtx.rip = uNewEip;
4741 else
4742 return iemRaiseGeneralProtectionFault0(pVCpu);
4743 }
4744 else
4745 {
4746 Assert(enmEffOpSize == IEMMODE_64BIT);
4747
4748 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4749 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4750 pVCpu->cpum.GstCtx.rip = uNewRip;
4751 else
4752 return iemRaiseGeneralProtectionFault0(pVCpu);
4753 }
4754
4755#ifndef IEM_WITH_CODE_TLB
4756 /* Flush the prefetch buffer. */
4757 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4758#endif
4759
4760 /*
4761 * Clear RF and finish the instruction (maybe raise #DB).
4762 */
4763 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4764}
4765
4766/** @} */
4767
4768
4769/** @name FPU access and helpers.
4770 *
4771 * @{
4772 */
4773
4774/**
4775 * Updates the x87.DS and FPUDP registers.
4776 *
4777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4778 * @param pFpuCtx The FPU context.
4779 * @param iEffSeg The effective segment register.
4780 * @param GCPtrEff The effective address relative to @a iEffSeg.
4781 */
4782DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4783{
4784 RTSEL sel;
4785 switch (iEffSeg)
4786 {
4787 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4788 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4789 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4790 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4791 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4792 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4793 default:
4794 AssertMsgFailed(("%d\n", iEffSeg));
4795 sel = pVCpu->cpum.GstCtx.ds.Sel;
4796 }
4797 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4798 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4799 {
4800 pFpuCtx->DS = 0;
4801 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4802 }
4803 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4804 {
4805 pFpuCtx->DS = sel;
4806 pFpuCtx->FPUDP = GCPtrEff;
4807 }
4808 else
4809 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4810}
4811
4812
4813/**
4814 * Rotates the stack registers in the push direction.
4815 *
4816 * @param pFpuCtx The FPU context.
4817 * @remarks This is a complete waste of time, but fxsave stores the registers in
4818 * stack order.
4819 */
4820DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4821{
4822 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4823 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4824 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4825 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4826 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4827 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4828 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4829 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4830 pFpuCtx->aRegs[0].r80 = r80Tmp;
4831}
4832
4833
4834/**
4835 * Rotates the stack registers in the pop direction.
4836 *
4837 * @param pFpuCtx The FPU context.
4838 * @remarks This is a complete waste of time, but fxsave stores the registers in
4839 * stack order.
4840 */
4841DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4842{
4843 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4844 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4845 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4846 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4847 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4848 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4849 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4850 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4851 pFpuCtx->aRegs[7].r80 = r80Tmp;
4852}
4853
4854
4855/**
4856 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4857 * exception prevents it.
4858 *
4859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4860 * @param pResult The FPU operation result to push.
4861 * @param pFpuCtx The FPU context.
4862 */
4863static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4864{
4865 /* Update FSW and bail if there are pending exceptions afterwards. */
4866 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4867 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4868 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4869 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4870 {
4871 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4872 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4873 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4874 pFpuCtx->FSW = fFsw;
4875 return;
4876 }
4877
4878 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4879 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4880 {
4881 /* All is fine, push the actual value. */
4882 pFpuCtx->FTW |= RT_BIT(iNewTop);
4883 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4884 }
4885 else if (pFpuCtx->FCW & X86_FCW_IM)
4886 {
4887 /* Masked stack overflow, push QNaN. */
4888 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4889 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4890 }
4891 else
4892 {
4893 /* Raise stack overflow, don't push anything. */
4894 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4895 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4896 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4897 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4898 return;
4899 }
4900
4901 fFsw &= ~X86_FSW_TOP_MASK;
4902 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4903 pFpuCtx->FSW = fFsw;
4904
4905 iemFpuRotateStackPush(pFpuCtx);
4906 RT_NOREF(pVCpu);
4907}
4908
4909
4910/**
4911 * Stores a result in a FPU register and updates the FSW and FTW.
4912 *
4913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4914 * @param pFpuCtx The FPU context.
4915 * @param pResult The result to store.
4916 * @param iStReg Which FPU register to store it in.
4917 */
4918static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4919{
4920 Assert(iStReg < 8);
4921 uint16_t fNewFsw = pFpuCtx->FSW;
4922 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4923 fNewFsw &= ~X86_FSW_C_MASK;
4924 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4925 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4926 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4927 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4928 pFpuCtx->FSW = fNewFsw;
4929 pFpuCtx->FTW |= RT_BIT(iReg);
4930 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4931 RT_NOREF(pVCpu);
4932}
4933
4934
4935/**
4936 * Only updates the FPU status word (FSW) with the result of the current
4937 * instruction.
4938 *
4939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4940 * @param pFpuCtx The FPU context.
4941 * @param u16FSW The FSW output of the current instruction.
4942 */
4943static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4944{
4945 uint16_t fNewFsw = pFpuCtx->FSW;
4946 fNewFsw &= ~X86_FSW_C_MASK;
4947 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4948 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4949 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4950 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4951 pFpuCtx->FSW = fNewFsw;
4952 RT_NOREF(pVCpu);
4953}
4954
4955
4956/**
4957 * Pops one item off the FPU stack if no pending exception prevents it.
4958 *
4959 * @param pFpuCtx The FPU context.
4960 */
4961static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4962{
4963 /* Check pending exceptions. */
4964 uint16_t uFSW = pFpuCtx->FSW;
4965 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4966 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4967 return;
4968
4969 /* TOP--. */
4970 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4971 uFSW &= ~X86_FSW_TOP_MASK;
4972 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4973 pFpuCtx->FSW = uFSW;
4974
4975 /* Mark the previous ST0 as empty. */
4976 iOldTop >>= X86_FSW_TOP_SHIFT;
4977 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4978
4979 /* Rotate the registers. */
4980 iemFpuRotateStackPop(pFpuCtx);
4981}
4982
4983
4984/**
4985 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4986 *
4987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4988 * @param pResult The FPU operation result to push.
4989 * @param uFpuOpcode The FPU opcode value.
4990 */
4991void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4992{
4993 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4994 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4995 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4996}
4997
4998
4999/**
5000 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5001 * and sets FPUDP and FPUDS.
5002 *
5003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5004 * @param pResult The FPU operation result to push.
5005 * @param iEffSeg The effective segment register.
5006 * @param GCPtrEff The effective address relative to @a iEffSeg.
5007 * @param uFpuOpcode The FPU opcode value.
5008 */
5009void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5010 uint16_t uFpuOpcode) RT_NOEXCEPT
5011{
5012 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5013 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5014 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5015 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5016}
5017
5018
5019/**
5020 * Replace ST0 with the first value and push the second onto the FPU stack,
5021 * unless a pending exception prevents it.
5022 *
5023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5024 * @param pResult The FPU operation result to store and push.
5025 * @param uFpuOpcode The FPU opcode value.
5026 */
5027void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5028{
5029 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5030 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5031
5032 /* Update FSW and bail if there are pending exceptions afterwards. */
5033 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5034 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5035 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5036 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5037 {
5038 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5039 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5040 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5041 pFpuCtx->FSW = fFsw;
5042 return;
5043 }
5044
5045 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5046 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5047 {
5048 /* All is fine, push the actual value. */
5049 pFpuCtx->FTW |= RT_BIT(iNewTop);
5050 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5051 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5052 }
5053 else if (pFpuCtx->FCW & X86_FCW_IM)
5054 {
5055 /* Masked stack overflow, push QNaN. */
5056 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5057 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5058 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5059 }
5060 else
5061 {
5062 /* Raise stack overflow, don't push anything. */
5063 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5064 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5065 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5066 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5067 return;
5068 }
5069
5070 fFsw &= ~X86_FSW_TOP_MASK;
5071 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5072 pFpuCtx->FSW = fFsw;
5073
5074 iemFpuRotateStackPush(pFpuCtx);
5075}
5076
5077
5078/**
5079 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5080 * FOP.
5081 *
5082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5083 * @param pResult The result to store.
5084 * @param iStReg Which FPU register to store it in.
5085 * @param uFpuOpcode The FPU opcode value.
5086 */
5087void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5088{
5089 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5090 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5091 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5092}
5093
5094
5095/**
5096 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5097 * FOP, and then pops the stack.
5098 *
5099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5100 * @param pResult The result to store.
5101 * @param iStReg Which FPU register to store it in.
5102 * @param uFpuOpcode The FPU opcode value.
5103 */
5104void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5105{
5106 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5107 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5108 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5109 iemFpuMaybePopOne(pFpuCtx);
5110}
5111
5112
5113/**
5114 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5115 * FPUDP, and FPUDS.
5116 *
5117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5118 * @param pResult The result to store.
5119 * @param iStReg Which FPU register to store it in.
5120 * @param iEffSeg The effective memory operand selector register.
5121 * @param GCPtrEff The effective memory operand offset.
5122 * @param uFpuOpcode The FPU opcode value.
5123 */
5124void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5125 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5126{
5127 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5128 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5129 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5130 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5131}
5132
5133
5134/**
5135 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5136 * FPUDP, and FPUDS, and then pops the stack.
5137 *
5138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5139 * @param pResult The result to store.
5140 * @param iStReg Which FPU register to store it in.
5141 * @param iEffSeg The effective memory operand selector register.
5142 * @param GCPtrEff The effective memory operand offset.
5143 * @param uFpuOpcode The FPU opcode value.
5144 */
5145void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5146 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5147{
5148 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5149 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5150 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5151 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5152 iemFpuMaybePopOne(pFpuCtx);
5153}
5154
5155
5156/**
5157 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5158 *
5159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5160 * @param uFpuOpcode The FPU opcode value.
5161 */
5162void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5163{
5164 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5165 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5166}
5167
5168
5169/**
5170 * Updates the FSW, FOP, FPUIP, and FPUCS.
5171 *
5172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5173 * @param u16FSW The FSW from the current instruction.
5174 * @param uFpuOpcode The FPU opcode value.
5175 */
5176void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5177{
5178 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5179 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5180 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5181}
5182
5183
5184/**
5185 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5186 *
5187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5188 * @param u16FSW The FSW from the current instruction.
5189 * @param uFpuOpcode The FPU opcode value.
5190 */
5191void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5192{
5193 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5194 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5195 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5196 iemFpuMaybePopOne(pFpuCtx);
5197}
5198
5199
5200/**
5201 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5202 *
5203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5204 * @param u16FSW The FSW from the current instruction.
5205 * @param iEffSeg The effective memory operand selector register.
5206 * @param GCPtrEff The effective memory operand offset.
5207 * @param uFpuOpcode The FPU opcode value.
5208 */
5209void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5210{
5211 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5212 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5213 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5214 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5215}
5216
5217
5218/**
5219 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5220 *
5221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5222 * @param u16FSW The FSW from the current instruction.
5223 * @param uFpuOpcode The FPU opcode value.
5224 */
5225void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5226{
5227 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5228 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5229 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5230 iemFpuMaybePopOne(pFpuCtx);
5231 iemFpuMaybePopOne(pFpuCtx);
5232}
5233
5234
5235/**
5236 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5237 *
5238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5239 * @param u16FSW The FSW from the current instruction.
5240 * @param iEffSeg The effective memory operand selector register.
5241 * @param GCPtrEff The effective memory operand offset.
5242 * @param uFpuOpcode The FPU opcode value.
5243 */
5244void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5245 uint16_t uFpuOpcode) RT_NOEXCEPT
5246{
5247 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5248 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5249 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5250 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5251 iemFpuMaybePopOne(pFpuCtx);
5252}
5253
5254
5255/**
5256 * Worker routine for raising an FPU stack underflow exception.
5257 *
5258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5259 * @param pFpuCtx The FPU context.
5260 * @param iStReg The stack register being accessed.
5261 */
5262static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5263{
5264 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5265 if (pFpuCtx->FCW & X86_FCW_IM)
5266 {
5267 /* Masked underflow. */
5268 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5269 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5270 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5271 if (iStReg != UINT8_MAX)
5272 {
5273 pFpuCtx->FTW |= RT_BIT(iReg);
5274 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5275 }
5276 }
5277 else
5278 {
5279 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5280 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5281 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5282 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5283 }
5284 RT_NOREF(pVCpu);
5285}
5286
5287
5288/**
5289 * Raises a FPU stack underflow exception.
5290 *
5291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5292 * @param iStReg The destination register that should be loaded
5293 * with QNaN if \#IS is not masked. Specify
5294 * UINT8_MAX if none (like for fcom).
5295 * @param uFpuOpcode The FPU opcode value.
5296 */
5297void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5298{
5299 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5300 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5301 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5302}
5303
5304
5305void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5306{
5307 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5308 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5309 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5310 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5311}
5312
5313
5314void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5315{
5316 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5317 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5318 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5319 iemFpuMaybePopOne(pFpuCtx);
5320}
5321
5322
5323void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5324 uint16_t uFpuOpcode) RT_NOEXCEPT
5325{
5326 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5327 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5328 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5329 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5330 iemFpuMaybePopOne(pFpuCtx);
5331}
5332
5333
5334void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5335{
5336 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5337 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5338 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5339 iemFpuMaybePopOne(pFpuCtx);
5340 iemFpuMaybePopOne(pFpuCtx);
5341}
5342
5343
5344void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5345{
5346 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5347 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5348
5349 if (pFpuCtx->FCW & X86_FCW_IM)
5350 {
5351 /* Masked overflow - Push QNaN. */
5352 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5353 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5354 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5355 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5356 pFpuCtx->FTW |= RT_BIT(iNewTop);
5357 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5358 iemFpuRotateStackPush(pFpuCtx);
5359 }
5360 else
5361 {
5362 /* Exception pending - don't change TOP or the register stack. */
5363 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5364 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5365 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5366 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5367 }
5368}
5369
5370
5371void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5372{
5373 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5374 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5375
5376 if (pFpuCtx->FCW & X86_FCW_IM)
5377 {
5378 /* Masked overflow - Push QNaN. */
5379 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5380 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5381 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5382 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5383 pFpuCtx->FTW |= RT_BIT(iNewTop);
5384 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5385 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5386 iemFpuRotateStackPush(pFpuCtx);
5387 }
5388 else
5389 {
5390 /* Exception pending - don't change TOP or the register stack. */
5391 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5392 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5393 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5394 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5395 }
5396}
5397
5398
5399/**
5400 * Worker routine for raising an FPU stack overflow exception on a push.
5401 *
5402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5403 * @param pFpuCtx The FPU context.
5404 */
5405static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5406{
5407 if (pFpuCtx->FCW & X86_FCW_IM)
5408 {
5409 /* Masked overflow. */
5410 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5411 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5412 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5413 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5414 pFpuCtx->FTW |= RT_BIT(iNewTop);
5415 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5416 iemFpuRotateStackPush(pFpuCtx);
5417 }
5418 else
5419 {
5420 /* Exception pending - don't change TOP or the register stack. */
5421 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5422 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5423 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5424 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5425 }
5426 RT_NOREF(pVCpu);
5427}
5428
5429
5430/**
5431 * Raises a FPU stack overflow exception on a push.
5432 *
5433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5434 * @param uFpuOpcode The FPU opcode value.
5435 */
5436void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5437{
5438 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5439 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5440 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5441}
5442
5443
5444/**
5445 * Raises a FPU stack overflow exception on a push with a memory operand.
5446 *
5447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5448 * @param iEffSeg The effective memory operand selector register.
5449 * @param GCPtrEff The effective memory operand offset.
5450 * @param uFpuOpcode The FPU opcode value.
5451 */
5452void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5453{
5454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5455 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5456 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5457 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5458}
5459
5460/** @} */
5461
5462
5463/** @name SSE+AVX SIMD access and helpers.
5464 *
5465 * @{
5466 */
5467/**
5468 * Stores a result in a SIMD XMM register, updates the MXCSR.
5469 *
5470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5471 * @param pResult The result to store.
5472 * @param iXmmReg Which SIMD XMM register to store the result in.
5473 */
5474void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5475{
5476 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5477 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5478
5479 /* The result is only updated if there is no unmasked exception pending. */
5480 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5481 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5482 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5483}
5484
5485
5486/**
5487 * Updates the MXCSR.
5488 *
5489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5490 * @param fMxcsr The new MXCSR value.
5491 */
5492void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5493{
5494 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5495 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5496}
5497/** @} */
5498
5499
5500/** @name Memory access.
5501 *
5502 * @{
5503 */
5504
5505#undef LOG_GROUP
5506#define LOG_GROUP LOG_GROUP_IEM_MEM
5507
5508/**
5509 * Updates the IEMCPU::cbWritten counter if applicable.
5510 *
5511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5512 * @param fAccess The access being accounted for.
5513 * @param cbMem The access size.
5514 */
5515DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5516{
5517 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5518 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5519 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5520}
5521
5522
5523/**
5524 * Applies the segment limit, base and attributes.
5525 *
5526 * This may raise a \#GP or \#SS.
5527 *
5528 * @returns VBox strict status code.
5529 *
5530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5531 * @param fAccess The kind of access which is being performed.
5532 * @param iSegReg The index of the segment register to apply.
5533 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5534 * TSS, ++).
5535 * @param cbMem The access size.
5536 * @param pGCPtrMem Pointer to the guest memory address to apply
5537 * segmentation to. Input and output parameter.
5538 */
5539VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5540{
5541 if (iSegReg == UINT8_MAX)
5542 return VINF_SUCCESS;
5543
5544 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5545 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5546 switch (IEM_GET_CPU_MODE(pVCpu))
5547 {
5548 case IEMMODE_16BIT:
5549 case IEMMODE_32BIT:
5550 {
5551 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5552 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5553
5554 if ( pSel->Attr.n.u1Present
5555 && !pSel->Attr.n.u1Unusable)
5556 {
5557 Assert(pSel->Attr.n.u1DescType);
5558 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5559 {
5560 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5561 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5562 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5563
5564 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5565 {
5566 /** @todo CPL check. */
5567 }
5568
5569 /*
5570 * There are two kinds of data selectors, normal and expand down.
5571 */
5572 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5573 {
5574 if ( GCPtrFirst32 > pSel->u32Limit
5575 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5576 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5577 }
5578 else
5579 {
5580 /*
5581 * The upper boundary is defined by the B bit, not the G bit!
5582 */
5583 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5584 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5585 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5586 }
5587 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5588 }
5589 else
5590 {
5591 /*
5592 * Code selector and usually be used to read thru, writing is
5593 * only permitted in real and V8086 mode.
5594 */
5595 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5596 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5597 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5598 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5599 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5600
5601 if ( GCPtrFirst32 > pSel->u32Limit
5602 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5603 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5604
5605 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5606 {
5607 /** @todo CPL check. */
5608 }
5609
5610 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5611 }
5612 }
5613 else
5614 return iemRaiseGeneralProtectionFault0(pVCpu);
5615 return VINF_SUCCESS;
5616 }
5617
5618 case IEMMODE_64BIT:
5619 {
5620 RTGCPTR GCPtrMem = *pGCPtrMem;
5621 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5622 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5623
5624 Assert(cbMem >= 1);
5625 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5626 return VINF_SUCCESS;
5627 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5628 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5629 return iemRaiseGeneralProtectionFault0(pVCpu);
5630 }
5631
5632 default:
5633 AssertFailedReturn(VERR_IEM_IPE_7);
5634 }
5635}
5636
5637
5638/**
5639 * Translates a virtual address to a physical physical address and checks if we
5640 * can access the page as specified.
5641 *
5642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5643 * @param GCPtrMem The virtual address.
5644 * @param cbAccess The access size, for raising \#PF correctly for
5645 * FXSAVE and such.
5646 * @param fAccess The intended access.
5647 * @param pGCPhysMem Where to return the physical address.
5648 */
5649VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5650 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5651{
5652 /** @todo Need a different PGM interface here. We're currently using
5653 * generic / REM interfaces. this won't cut it for R0. */
5654 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5655 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5656 * here. */
5657 PGMPTWALK Walk;
5658 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5659 if (RT_FAILURE(rc))
5660 {
5661 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5662 /** @todo Check unassigned memory in unpaged mode. */
5663 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5664#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5665 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5666 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5667#endif
5668 *pGCPhysMem = NIL_RTGCPHYS;
5669 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5670 }
5671
5672 /* If the page is writable and does not have the no-exec bit set, all
5673 access is allowed. Otherwise we'll have to check more carefully... */
5674 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5675 {
5676 /* Write to read only memory? */
5677 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5678 && !(Walk.fEffective & X86_PTE_RW)
5679 && ( ( IEM_GET_CPL(pVCpu) == 3
5680 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5681 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5682 {
5683 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5684 *pGCPhysMem = NIL_RTGCPHYS;
5685#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5686 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5687 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5688#endif
5689 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5690 }
5691
5692 /* Kernel memory accessed by userland? */
5693 if ( !(Walk.fEffective & X86_PTE_US)
5694 && IEM_GET_CPL(pVCpu) == 3
5695 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5696 {
5697 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5698 *pGCPhysMem = NIL_RTGCPHYS;
5699#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5700 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5701 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5702#endif
5703 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5704 }
5705
5706 /* Executing non-executable memory? */
5707 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5708 && (Walk.fEffective & X86_PTE_PAE_NX)
5709 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5710 {
5711 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5712 *pGCPhysMem = NIL_RTGCPHYS;
5713#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5714 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5715 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5716#endif
5717 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5718 VERR_ACCESS_DENIED);
5719 }
5720 }
5721
5722 /*
5723 * Set the dirty / access flags.
5724 * ASSUMES this is set when the address is translated rather than on committ...
5725 */
5726 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5727 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5728 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5729 {
5730 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5731 AssertRC(rc2);
5732 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5733 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5734 }
5735
5736 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5737 *pGCPhysMem = GCPhys;
5738 return VINF_SUCCESS;
5739}
5740
5741#if 0 /*unused*/
5742/**
5743 * Looks up a memory mapping entry.
5744 *
5745 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5747 * @param pvMem The memory address.
5748 * @param fAccess The access to.
5749 */
5750DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5751{
5752 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5753 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5754 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5755 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5756 return 0;
5757 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5758 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5759 return 1;
5760 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5761 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5762 return 2;
5763 return VERR_NOT_FOUND;
5764}
5765#endif
5766
5767/**
5768 * Finds a free memmap entry when using iNextMapping doesn't work.
5769 *
5770 * @returns Memory mapping index, 1024 on failure.
5771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5772 */
5773static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5774{
5775 /*
5776 * The easy case.
5777 */
5778 if (pVCpu->iem.s.cActiveMappings == 0)
5779 {
5780 pVCpu->iem.s.iNextMapping = 1;
5781 return 0;
5782 }
5783
5784 /* There should be enough mappings for all instructions. */
5785 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5786
5787 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5788 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5789 return i;
5790
5791 AssertFailedReturn(1024);
5792}
5793
5794
5795/**
5796 * Commits a bounce buffer that needs writing back and unmaps it.
5797 *
5798 * @returns Strict VBox status code.
5799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5800 * @param iMemMap The index of the buffer to commit.
5801 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5802 * Always false in ring-3, obviously.
5803 */
5804static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5805{
5806 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5807 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5808#ifdef IN_RING3
5809 Assert(!fPostponeFail);
5810 RT_NOREF_PV(fPostponeFail);
5811#endif
5812
5813 /*
5814 * Do the writing.
5815 */
5816 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5817 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5818 {
5819 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5820 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5821 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5822 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5823 {
5824 /*
5825 * Carefully and efficiently dealing with access handler return
5826 * codes make this a little bloated.
5827 */
5828 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5830 pbBuf,
5831 cbFirst,
5832 PGMACCESSORIGIN_IEM);
5833 if (rcStrict == VINF_SUCCESS)
5834 {
5835 if (cbSecond)
5836 {
5837 rcStrict = PGMPhysWrite(pVM,
5838 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5839 pbBuf + cbFirst,
5840 cbSecond,
5841 PGMACCESSORIGIN_IEM);
5842 if (rcStrict == VINF_SUCCESS)
5843 { /* nothing */ }
5844 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5845 {
5846 LogEx(LOG_GROUP_IEM,
5847 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5849 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5850 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5851 }
5852#ifndef IN_RING3
5853 else if (fPostponeFail)
5854 {
5855 LogEx(LOG_GROUP_IEM,
5856 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5858 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5859 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5860 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5861 return iemSetPassUpStatus(pVCpu, rcStrict);
5862 }
5863#endif
5864 else
5865 {
5866 LogEx(LOG_GROUP_IEM,
5867 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5868 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5869 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5870 return rcStrict;
5871 }
5872 }
5873 }
5874 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5875 {
5876 if (!cbSecond)
5877 {
5878 LogEx(LOG_GROUP_IEM,
5879 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5880 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5881 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5882 }
5883 else
5884 {
5885 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5886 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5887 pbBuf + cbFirst,
5888 cbSecond,
5889 PGMACCESSORIGIN_IEM);
5890 if (rcStrict2 == VINF_SUCCESS)
5891 {
5892 LogEx(LOG_GROUP_IEM,
5893 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5896 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5897 }
5898 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5899 {
5900 LogEx(LOG_GROUP_IEM,
5901 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5902 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5903 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5904 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5905 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5906 }
5907#ifndef IN_RING3
5908 else if (fPostponeFail)
5909 {
5910 LogEx(LOG_GROUP_IEM,
5911 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5913 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5914 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5915 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5916 return iemSetPassUpStatus(pVCpu, rcStrict);
5917 }
5918#endif
5919 else
5920 {
5921 LogEx(LOG_GROUP_IEM,
5922 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5923 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5925 return rcStrict2;
5926 }
5927 }
5928 }
5929#ifndef IN_RING3
5930 else if (fPostponeFail)
5931 {
5932 LogEx(LOG_GROUP_IEM,
5933 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5934 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5935 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5936 if (!cbSecond)
5937 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5938 else
5939 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5940 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5941 return iemSetPassUpStatus(pVCpu, rcStrict);
5942 }
5943#endif
5944 else
5945 {
5946 LogEx(LOG_GROUP_IEM,
5947 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5948 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5949 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5950 return rcStrict;
5951 }
5952 }
5953 else
5954 {
5955 /*
5956 * No access handlers, much simpler.
5957 */
5958 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5959 if (RT_SUCCESS(rc))
5960 {
5961 if (cbSecond)
5962 {
5963 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5964 if (RT_SUCCESS(rc))
5965 { /* likely */ }
5966 else
5967 {
5968 LogEx(LOG_GROUP_IEM,
5969 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5970 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5971 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5972 return rc;
5973 }
5974 }
5975 }
5976 else
5977 {
5978 LogEx(LOG_GROUP_IEM,
5979 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5980 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5981 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5982 return rc;
5983 }
5984 }
5985 }
5986
5987#if defined(IEM_LOG_MEMORY_WRITES)
5988 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5989 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5990 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5991 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5992 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5993 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5994
5995 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5996 g_cbIemWrote = cbWrote;
5997 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5998#endif
5999
6000 /*
6001 * Free the mapping entry.
6002 */
6003 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6004 Assert(pVCpu->iem.s.cActiveMappings != 0);
6005 pVCpu->iem.s.cActiveMappings--;
6006 return VINF_SUCCESS;
6007}
6008
6009
6010/**
6011 * iemMemMap worker that deals with a request crossing pages.
6012 */
6013static VBOXSTRICTRC
6014iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6015 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6016{
6017 Assert(cbMem <= GUEST_PAGE_SIZE);
6018
6019 /*
6020 * Do the address translations.
6021 */
6022 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6023 RTGCPHYS GCPhysFirst;
6024 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6025 if (rcStrict != VINF_SUCCESS)
6026 return rcStrict;
6027 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6028
6029 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6030 RTGCPHYS GCPhysSecond;
6031 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6032 cbSecondPage, fAccess, &GCPhysSecond);
6033 if (rcStrict != VINF_SUCCESS)
6034 return rcStrict;
6035 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6036 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6037
6038 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6039
6040 /*
6041 * Read in the current memory content if it's a read, execute or partial
6042 * write access.
6043 */
6044 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6045
6046 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6047 {
6048 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6049 {
6050 /*
6051 * Must carefully deal with access handler status codes here,
6052 * makes the code a bit bloated.
6053 */
6054 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6055 if (rcStrict == VINF_SUCCESS)
6056 {
6057 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6058 if (rcStrict == VINF_SUCCESS)
6059 { /*likely */ }
6060 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6061 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6062 else
6063 {
6064 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6065 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6066 return rcStrict;
6067 }
6068 }
6069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6070 {
6071 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6072 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6073 {
6074 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6075 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6076 }
6077 else
6078 {
6079 LogEx(LOG_GROUP_IEM,
6080 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6081 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6082 return rcStrict2;
6083 }
6084 }
6085 else
6086 {
6087 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6088 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6089 return rcStrict;
6090 }
6091 }
6092 else
6093 {
6094 /*
6095 * No informational status codes here, much more straight forward.
6096 */
6097 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6098 if (RT_SUCCESS(rc))
6099 {
6100 Assert(rc == VINF_SUCCESS);
6101 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6102 if (RT_SUCCESS(rc))
6103 Assert(rc == VINF_SUCCESS);
6104 else
6105 {
6106 LogEx(LOG_GROUP_IEM,
6107 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6108 return rc;
6109 }
6110 }
6111 else
6112 {
6113 LogEx(LOG_GROUP_IEM,
6114 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6115 return rc;
6116 }
6117 }
6118 }
6119#ifdef VBOX_STRICT
6120 else
6121 memset(pbBuf, 0xcc, cbMem);
6122 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6123 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6124#endif
6125 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6126
6127 /*
6128 * Commit the bounce buffer entry.
6129 */
6130 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6131 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6132 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6133 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6134 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6135 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6136 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6137 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6138 pVCpu->iem.s.cActiveMappings++;
6139
6140 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6141 *ppvMem = pbBuf;
6142 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6143 return VINF_SUCCESS;
6144}
6145
6146
6147/**
6148 * iemMemMap woker that deals with iemMemPageMap failures.
6149 */
6150static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6151 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6152{
6153 /*
6154 * Filter out conditions we can handle and the ones which shouldn't happen.
6155 */
6156 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6157 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6158 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6159 {
6160 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6161 return rcMap;
6162 }
6163 pVCpu->iem.s.cPotentialExits++;
6164
6165 /*
6166 * Read in the current memory content if it's a read, execute or partial
6167 * write access.
6168 */
6169 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6170 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6171 {
6172 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6173 memset(pbBuf, 0xff, cbMem);
6174 else
6175 {
6176 int rc;
6177 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6178 {
6179 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6180 if (rcStrict == VINF_SUCCESS)
6181 { /* nothing */ }
6182 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6183 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6184 else
6185 {
6186 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6187 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6188 return rcStrict;
6189 }
6190 }
6191 else
6192 {
6193 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6194 if (RT_SUCCESS(rc))
6195 { /* likely */ }
6196 else
6197 {
6198 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6199 GCPhysFirst, rc));
6200 return rc;
6201 }
6202 }
6203 }
6204 }
6205#ifdef VBOX_STRICT
6206 else
6207 memset(pbBuf, 0xcc, cbMem);
6208#endif
6209#ifdef VBOX_STRICT
6210 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6211 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6212#endif
6213
6214 /*
6215 * Commit the bounce buffer entry.
6216 */
6217 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6218 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6219 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6220 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6221 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6222 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6223 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6224 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6225 pVCpu->iem.s.cActiveMappings++;
6226
6227 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6228 *ppvMem = pbBuf;
6229 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6230 return VINF_SUCCESS;
6231}
6232
6233
6234
6235/**
6236 * Maps the specified guest memory for the given kind of access.
6237 *
6238 * This may be using bounce buffering of the memory if it's crossing a page
6239 * boundary or if there is an access handler installed for any of it. Because
6240 * of lock prefix guarantees, we're in for some extra clutter when this
6241 * happens.
6242 *
6243 * This may raise a \#GP, \#SS, \#PF or \#AC.
6244 *
6245 * @returns VBox strict status code.
6246 *
6247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6248 * @param ppvMem Where to return the pointer to the mapped memory.
6249 * @param pbUnmapInfo Where to return unmap info to be passed to
6250 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6251 * done.
6252 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6253 * 8, 12, 16, 32 or 512. When used by string operations
6254 * it can be up to a page.
6255 * @param iSegReg The index of the segment register to use for this
6256 * access. The base and limits are checked. Use UINT8_MAX
6257 * to indicate that no segmentation is required (for IDT,
6258 * GDT and LDT accesses).
6259 * @param GCPtrMem The address of the guest memory.
6260 * @param fAccess How the memory is being accessed. The
6261 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6262 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6263 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6264 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6265 * set.
6266 * @param uAlignCtl Alignment control:
6267 * - Bits 15:0 is the alignment mask.
6268 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6269 * IEM_MEMMAP_F_ALIGN_SSE, and
6270 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6271 * Pass zero to skip alignment.
6272 */
6273VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6274 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6275{
6276 /*
6277 * Check the input and figure out which mapping entry to use.
6278 */
6279 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6280 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6281 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6282 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6283 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6284
6285 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6286 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6287 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6288 {
6289 iMemMap = iemMemMapFindFree(pVCpu);
6290 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6291 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6292 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6293 pVCpu->iem.s.aMemMappings[2].fAccess),
6294 VERR_IEM_IPE_9);
6295 }
6296
6297 /*
6298 * Map the memory, checking that we can actually access it. If something
6299 * slightly complicated happens, fall back on bounce buffering.
6300 */
6301 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6302 if (rcStrict == VINF_SUCCESS)
6303 { /* likely */ }
6304 else
6305 return rcStrict;
6306
6307 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6308 { /* likely */ }
6309 else
6310 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6311
6312 /*
6313 * Alignment check.
6314 */
6315 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6316 { /* likelyish */ }
6317 else
6318 {
6319 /* Misaligned access. */
6320 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6321 {
6322 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6323 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6324 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6325 {
6326 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6327
6328 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6329 return iemRaiseAlignmentCheckException(pVCpu);
6330 }
6331 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6332 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6333 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6334 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6335 * that's what FXSAVE does on a 10980xe. */
6336 && iemMemAreAlignmentChecksEnabled(pVCpu))
6337 return iemRaiseAlignmentCheckException(pVCpu);
6338 else
6339 return iemRaiseGeneralProtectionFault0(pVCpu);
6340 }
6341
6342#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6343 /* If the access is atomic there are host platform alignmnet restrictions
6344 we need to conform with. */
6345 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6346# if defined(RT_ARCH_AMD64)
6347 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6348# elif defined(RT_ARCH_ARM64)
6349 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6350# else
6351# error port me
6352# endif
6353 )
6354 { /* okay */ }
6355 else
6356 {
6357 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6358 pVCpu->iem.s.cMisalignedAtomics += 1;
6359 return VINF_EM_EMULATE_SPLIT_LOCK;
6360 }
6361#endif
6362 }
6363
6364#ifdef IEM_WITH_DATA_TLB
6365 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6366
6367 /*
6368 * Get the TLB entry for this page.
6369 */
6370 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6371 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6372 if (pTlbe->uTag == uTag)
6373 {
6374# ifdef VBOX_WITH_STATISTICS
6375 pVCpu->iem.s.DataTlb.cTlbHits++;
6376# endif
6377 }
6378 else
6379 {
6380 pVCpu->iem.s.DataTlb.cTlbMisses++;
6381 PGMPTWALK Walk;
6382 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6383 if (RT_FAILURE(rc))
6384 {
6385 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6386# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6387 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6388 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6389# endif
6390 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6391 }
6392
6393 Assert(Walk.fSucceeded);
6394 pTlbe->uTag = uTag;
6395 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6396 pTlbe->GCPhys = Walk.GCPhys;
6397 pTlbe->pbMappingR3 = NULL;
6398 }
6399
6400 /*
6401 * Check TLB page table level access flags.
6402 */
6403 /* If the page is either supervisor only or non-writable, we need to do
6404 more careful access checks. */
6405 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6406 {
6407 /* Write to read only memory? */
6408 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6409 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6410 && ( ( IEM_GET_CPL(pVCpu) == 3
6411 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6412 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6413 {
6414 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6415# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6416 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6417 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6418# endif
6419 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6420 }
6421
6422 /* Kernel memory accessed by userland? */
6423 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6424 && IEM_GET_CPL(pVCpu) == 3
6425 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6426 {
6427 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6428# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6429 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6430 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6431# endif
6432 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6433 }
6434 }
6435
6436 /*
6437 * Set the dirty / access flags.
6438 * ASSUMES this is set when the address is translated rather than on commit...
6439 */
6440 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6441 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6442 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6443 {
6444 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6445 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6446 AssertRC(rc2);
6447 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6448 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6449 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6450 }
6451
6452 /*
6453 * Look up the physical page info if necessary.
6454 */
6455 uint8_t *pbMem = NULL;
6456 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6457# ifdef IN_RING3
6458 pbMem = pTlbe->pbMappingR3;
6459# else
6460 pbMem = NULL;
6461# endif
6462 else
6463 {
6464 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6465 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6466 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6467 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6468 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6469 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6470 { /* likely */ }
6471 else
6472 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6473 pTlbe->pbMappingR3 = NULL;
6474 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6475 | IEMTLBE_F_NO_MAPPINGR3
6476 | IEMTLBE_F_PG_NO_READ
6477 | IEMTLBE_F_PG_NO_WRITE
6478 | IEMTLBE_F_PG_UNASSIGNED
6479 | IEMTLBE_F_PG_CODE_PAGE);
6480 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6481 &pbMem, &pTlbe->fFlagsAndPhysRev);
6482 AssertRCReturn(rc, rc);
6483# ifdef IN_RING3
6484 pTlbe->pbMappingR3 = pbMem;
6485# endif
6486 }
6487
6488 /*
6489 * Check the physical page level access and mapping.
6490 */
6491 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6492 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6493 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6494 { /* probably likely */ }
6495 else
6496 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6497 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6498 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6499 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6500 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6501 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6502
6503 if (pbMem)
6504 {
6505 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6506 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6507 fAccess |= IEM_ACCESS_NOT_LOCKED;
6508 }
6509 else
6510 {
6511 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6512 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6513 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6514 if (rcStrict != VINF_SUCCESS)
6515 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6516 }
6517
6518 void * const pvMem = pbMem;
6519
6520 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6521 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6522 if (fAccess & IEM_ACCESS_TYPE_READ)
6523 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6524
6525#else /* !IEM_WITH_DATA_TLB */
6526
6527 RTGCPHYS GCPhysFirst;
6528 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6529 if (rcStrict != VINF_SUCCESS)
6530 return rcStrict;
6531
6532 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6533 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6534 if (fAccess & IEM_ACCESS_TYPE_READ)
6535 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6536
6537 void *pvMem;
6538 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6539 if (rcStrict != VINF_SUCCESS)
6540 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6541
6542#endif /* !IEM_WITH_DATA_TLB */
6543
6544 /*
6545 * Fill in the mapping table entry.
6546 */
6547 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6548 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6549 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6550 pVCpu->iem.s.cActiveMappings += 1;
6551
6552 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6553 *ppvMem = pvMem;
6554 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6555 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6556 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6557
6558 return VINF_SUCCESS;
6559}
6560
6561
6562/**
6563 * Commits the guest memory if bounce buffered and unmaps it.
6564 *
6565 * @returns Strict VBox status code.
6566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6567 * @param bUnmapInfo Unmap info set by iemMemMap.
6568 */
6569VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6570{
6571 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6572 AssertMsgReturn( (bUnmapInfo & 0x08)
6573 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6574 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6575 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6576 VERR_NOT_FOUND);
6577
6578 /* If it's bounce buffered, we may need to write back the buffer. */
6579 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6580 {
6581 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6582 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6583 }
6584 /* Otherwise unlock it. */
6585 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6586 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6587
6588 /* Free the entry. */
6589 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6590 Assert(pVCpu->iem.s.cActiveMappings != 0);
6591 pVCpu->iem.s.cActiveMappings--;
6592 return VINF_SUCCESS;
6593}
6594
6595
6596/**
6597 * Rolls back the guest memory (conceptually only) and unmaps it.
6598 *
6599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6600 * @param bUnmapInfo Unmap info set by iemMemMap.
6601 */
6602void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6603{
6604 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6605 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6606 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6607 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6608 == ((unsigned)bUnmapInfo >> 4),
6609 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6610
6611 /* Unlock it if necessary. */
6612 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6613 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6614
6615 /* Free the entry. */
6616 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6617 Assert(pVCpu->iem.s.cActiveMappings != 0);
6618 pVCpu->iem.s.cActiveMappings--;
6619}
6620
6621#ifdef IEM_WITH_SETJMP
6622
6623/**
6624 * Maps the specified guest memory for the given kind of access, longjmp on
6625 * error.
6626 *
6627 * This may be using bounce buffering of the memory if it's crossing a page
6628 * boundary or if there is an access handler installed for any of it. Because
6629 * of lock prefix guarantees, we're in for some extra clutter when this
6630 * happens.
6631 *
6632 * This may raise a \#GP, \#SS, \#PF or \#AC.
6633 *
6634 * @returns Pointer to the mapped memory.
6635 *
6636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6637 * @param bUnmapInfo Where to return unmap info to be passed to
6638 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6639 * iemMemCommitAndUnmapWoSafeJmp,
6640 * iemMemCommitAndUnmapRoSafeJmp,
6641 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6642 * when done.
6643 * @param cbMem The number of bytes to map. This is usually 1,
6644 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6645 * string operations it can be up to a page.
6646 * @param iSegReg The index of the segment register to use for
6647 * this access. The base and limits are checked.
6648 * Use UINT8_MAX to indicate that no segmentation
6649 * is required (for IDT, GDT and LDT accesses).
6650 * @param GCPtrMem The address of the guest memory.
6651 * @param fAccess How the memory is being accessed. The
6652 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6653 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6654 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6655 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6656 * set.
6657 * @param uAlignCtl Alignment control:
6658 * - Bits 15:0 is the alignment mask.
6659 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6660 * IEM_MEMMAP_F_ALIGN_SSE, and
6661 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6662 * Pass zero to skip alignment.
6663 */
6664void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6665 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6666{
6667 /*
6668 * Check the input, check segment access and adjust address
6669 * with segment base.
6670 */
6671 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6672 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6673 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6674
6675 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6676 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6677 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6678
6679 /*
6680 * Alignment check.
6681 */
6682 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6683 { /* likelyish */ }
6684 else
6685 {
6686 /* Misaligned access. */
6687 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6688 {
6689 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6690 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6691 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6692 {
6693 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6694
6695 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6696 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6697 }
6698 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6699 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6700 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6701 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6702 * that's what FXSAVE does on a 10980xe. */
6703 && iemMemAreAlignmentChecksEnabled(pVCpu))
6704 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6705 else
6706 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6707 }
6708
6709#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6710 /* If the access is atomic there are host platform alignmnet restrictions
6711 we need to conform with. */
6712 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6713# if defined(RT_ARCH_AMD64)
6714 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6715# elif defined(RT_ARCH_ARM64)
6716 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6717# else
6718# error port me
6719# endif
6720 )
6721 { /* okay */ }
6722 else
6723 {
6724 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6725 pVCpu->iem.s.cMisalignedAtomics += 1;
6726 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6727 }
6728#endif
6729 }
6730
6731 /*
6732 * Figure out which mapping entry to use.
6733 */
6734 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6735 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6736 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6737 {
6738 iMemMap = iemMemMapFindFree(pVCpu);
6739 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6740 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6741 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6742 pVCpu->iem.s.aMemMappings[2].fAccess),
6743 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6744 }
6745
6746 /*
6747 * Crossing a page boundary?
6748 */
6749 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6750 { /* No (likely). */ }
6751 else
6752 {
6753 void *pvMem;
6754 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6755 if (rcStrict == VINF_SUCCESS)
6756 return pvMem;
6757 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6758 }
6759
6760#ifdef IEM_WITH_DATA_TLB
6761 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6762
6763 /*
6764 * Get the TLB entry for this page.
6765 */
6766 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6767 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6768 if (pTlbe->uTag == uTag)
6769 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6770 else
6771 {
6772 pVCpu->iem.s.DataTlb.cTlbMisses++;
6773 PGMPTWALK Walk;
6774 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6775 if (RT_FAILURE(rc))
6776 {
6777 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6778# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6779 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6780 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6781# endif
6782 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6783 }
6784
6785 Assert(Walk.fSucceeded);
6786 pTlbe->uTag = uTag;
6787 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6788 pTlbe->GCPhys = Walk.GCPhys;
6789 pTlbe->pbMappingR3 = NULL;
6790 }
6791
6792 /*
6793 * Check the flags and physical revision.
6794 */
6795 /** @todo make the caller pass these in with fAccess. */
6796 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6797 ? IEMTLBE_F_PT_NO_USER : 0;
6798 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6799 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6800 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6801 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6802 ? IEMTLBE_F_PT_NO_WRITE : 0)
6803 : 0;
6804 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6805 uint8_t *pbMem = NULL;
6806 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6807 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6808# ifdef IN_RING3
6809 pbMem = pTlbe->pbMappingR3;
6810# else
6811 pbMem = NULL;
6812# endif
6813 else
6814 {
6815 /*
6816 * Okay, something isn't quite right or needs refreshing.
6817 */
6818 /* Write to read only memory? */
6819 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6820 {
6821 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6822# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6823 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6824 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6825# endif
6826 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6827 }
6828
6829 /* Kernel memory accessed by userland? */
6830 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6831 {
6832 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6833# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6834 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6835 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6836# endif
6837 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6838 }
6839
6840 /* Set the dirty / access flags.
6841 ASSUMES this is set when the address is translated rather than on commit... */
6842 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6843 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6844 {
6845 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6846 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6847 AssertRC(rc2);
6848 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6849 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6850 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6851 }
6852
6853 /*
6854 * Check if the physical page info needs updating.
6855 */
6856 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6857# ifdef IN_RING3
6858 pbMem = pTlbe->pbMappingR3;
6859# else
6860 pbMem = NULL;
6861# endif
6862 else
6863 {
6864 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6865 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6866 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6867 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6868 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6869 pTlbe->pbMappingR3 = NULL;
6870 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6871 | IEMTLBE_F_NO_MAPPINGR3
6872 | IEMTLBE_F_PG_NO_READ
6873 | IEMTLBE_F_PG_NO_WRITE
6874 | IEMTLBE_F_PG_UNASSIGNED
6875 | IEMTLBE_F_PG_CODE_PAGE);
6876 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6877 &pbMem, &pTlbe->fFlagsAndPhysRev);
6878 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6879# ifdef IN_RING3
6880 pTlbe->pbMappingR3 = pbMem;
6881# endif
6882 }
6883
6884 /*
6885 * Check the physical page level access and mapping.
6886 */
6887 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6888 { /* probably likely */ }
6889 else
6890 {
6891 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6892 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6893 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6894 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6895 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6896 if (rcStrict == VINF_SUCCESS)
6897 return pbMem;
6898 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6899 }
6900 }
6901 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6902
6903 if (pbMem)
6904 {
6905 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6906 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6907 fAccess |= IEM_ACCESS_NOT_LOCKED;
6908 }
6909 else
6910 {
6911 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6912 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6913 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6914 if (rcStrict == VINF_SUCCESS)
6915 {
6916 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6917 return pbMem;
6918 }
6919 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6920 }
6921
6922 void * const pvMem = pbMem;
6923
6924 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6925 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6926 if (fAccess & IEM_ACCESS_TYPE_READ)
6927 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6928
6929#else /* !IEM_WITH_DATA_TLB */
6930
6931
6932 RTGCPHYS GCPhysFirst;
6933 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6934 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6935 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6936
6937 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6938 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6939 if (fAccess & IEM_ACCESS_TYPE_READ)
6940 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6941
6942 void *pvMem;
6943 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6944 if (rcStrict == VINF_SUCCESS)
6945 { /* likely */ }
6946 else
6947 {
6948 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6949 if (rcStrict == VINF_SUCCESS)
6950 return pvMem;
6951 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6952 }
6953
6954#endif /* !IEM_WITH_DATA_TLB */
6955
6956 /*
6957 * Fill in the mapping table entry.
6958 */
6959 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6960 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6961 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6962 pVCpu->iem.s.cActiveMappings++;
6963
6964 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6965
6966 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6967 return pvMem;
6968}
6969
6970
6971/**
6972 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6973 *
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 * @param pvMem The mapping.
6976 * @param fAccess The kind of access.
6977 */
6978void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6979{
6980 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6981 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6982 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6983 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6984 == ((unsigned)bUnmapInfo >> 4),
6985 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6986
6987 /* If it's bounce buffered, we may need to write back the buffer. */
6988 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6989 {
6990 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6991 {
6992 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6993 if (rcStrict == VINF_SUCCESS)
6994 return;
6995 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6996 }
6997 }
6998 /* Otherwise unlock it. */
6999 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7000 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7001
7002 /* Free the entry. */
7003 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7004 Assert(pVCpu->iem.s.cActiveMappings != 0);
7005 pVCpu->iem.s.cActiveMappings--;
7006}
7007
7008
7009/** Fallback for iemMemCommitAndUnmapRwJmp. */
7010void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7011{
7012 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7013 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7014}
7015
7016
7017/** Fallback for iemMemCommitAndUnmapAtJmp. */
7018void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7019{
7020 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7021 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7022}
7023
7024
7025/** Fallback for iemMemCommitAndUnmapWoJmp. */
7026void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7027{
7028 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7029 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7030}
7031
7032
7033/** Fallback for iemMemCommitAndUnmapRoJmp. */
7034void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7035{
7036 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7037 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7038}
7039
7040
7041/** Fallback for iemMemRollbackAndUnmapWo. */
7042void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7043{
7044 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7045 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7046}
7047
7048#endif /* IEM_WITH_SETJMP */
7049
7050#ifndef IN_RING3
7051/**
7052 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7053 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7054 *
7055 * Allows the instruction to be completed and retired, while the IEM user will
7056 * return to ring-3 immediately afterwards and do the postponed writes there.
7057 *
7058 * @returns VBox status code (no strict statuses). Caller must check
7059 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7061 * @param pvMem The mapping.
7062 * @param fAccess The kind of access.
7063 */
7064VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7065{
7066 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7067 AssertMsgReturn( (bUnmapInfo & 0x08)
7068 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7069 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7070 == ((unsigned)bUnmapInfo >> 4),
7071 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7072 VERR_NOT_FOUND);
7073
7074 /* If it's bounce buffered, we may need to write back the buffer. */
7075 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7076 {
7077 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7078 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7079 }
7080 /* Otherwise unlock it. */
7081 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7082 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7083
7084 /* Free the entry. */
7085 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7086 Assert(pVCpu->iem.s.cActiveMappings != 0);
7087 pVCpu->iem.s.cActiveMappings--;
7088 return VINF_SUCCESS;
7089}
7090#endif
7091
7092
7093/**
7094 * Rollbacks mappings, releasing page locks and such.
7095 *
7096 * The caller shall only call this after checking cActiveMappings.
7097 *
7098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7099 */
7100void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7101{
7102 Assert(pVCpu->iem.s.cActiveMappings > 0);
7103
7104 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7105 while (iMemMap-- > 0)
7106 {
7107 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7108 if (fAccess != IEM_ACCESS_INVALID)
7109 {
7110 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7111 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7112 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7113 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7114 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7115 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7116 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7117 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7118 pVCpu->iem.s.cActiveMappings--;
7119 }
7120 }
7121}
7122
7123
7124/*
7125 * Instantiate R/W templates.
7126 */
7127#define TMPL_MEM_WITH_STACK
7128
7129#define TMPL_MEM_TYPE uint8_t
7130#define TMPL_MEM_FN_SUFF U8
7131#define TMPL_MEM_FMT_TYPE "%#04x"
7132#define TMPL_MEM_FMT_DESC "byte"
7133#include "IEMAllMemRWTmpl.cpp.h"
7134
7135#define TMPL_MEM_TYPE uint16_t
7136#define TMPL_MEM_FN_SUFF U16
7137#define TMPL_MEM_FMT_TYPE "%#06x"
7138#define TMPL_MEM_FMT_DESC "word"
7139#include "IEMAllMemRWTmpl.cpp.h"
7140
7141#define TMPL_WITH_PUSH_SREG
7142#define TMPL_MEM_TYPE uint32_t
7143#define TMPL_MEM_FN_SUFF U32
7144#define TMPL_MEM_FMT_TYPE "%#010x"
7145#define TMPL_MEM_FMT_DESC "dword"
7146#include "IEMAllMemRWTmpl.cpp.h"
7147#undef TMPL_WITH_PUSH_SREG
7148
7149#define TMPL_MEM_TYPE uint64_t
7150#define TMPL_MEM_FN_SUFF U64
7151#define TMPL_MEM_FMT_TYPE "%#018RX64"
7152#define TMPL_MEM_FMT_DESC "qword"
7153#include "IEMAllMemRWTmpl.cpp.h"
7154
7155#undef TMPL_MEM_WITH_STACK
7156
7157#define TMPL_MEM_TYPE uint64_t
7158#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7159#define TMPL_MEM_FN_SUFF U64AlignedU128
7160#define TMPL_MEM_FMT_TYPE "%#018RX64"
7161#define TMPL_MEM_FMT_DESC "qword"
7162#include "IEMAllMemRWTmpl.cpp.h"
7163
7164/* See IEMAllMemRWTmplInline.cpp.h */
7165#define TMPL_MEM_BY_REF
7166
7167#define TMPL_MEM_TYPE RTFLOAT80U
7168#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7169#define TMPL_MEM_FN_SUFF R80
7170#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7171#define TMPL_MEM_FMT_DESC "tword"
7172#include "IEMAllMemRWTmpl.cpp.h"
7173
7174#define TMPL_MEM_TYPE RTPBCD80U
7175#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7176#define TMPL_MEM_FN_SUFF D80
7177#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7178#define TMPL_MEM_FMT_DESC "tword"
7179#include "IEMAllMemRWTmpl.cpp.h"
7180
7181#define TMPL_MEM_TYPE RTUINT128U
7182#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7183#define TMPL_MEM_FN_SUFF U128
7184#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7185#define TMPL_MEM_FMT_DESC "dqword"
7186#include "IEMAllMemRWTmpl.cpp.h"
7187
7188#define TMPL_MEM_TYPE RTUINT128U
7189#define TMPL_MEM_TYPE_ALIGN 0
7190#define TMPL_MEM_FN_SUFF U128NoAc
7191#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7192#define TMPL_MEM_FMT_DESC "dqword"
7193#include "IEMAllMemRWTmpl.cpp.h"
7194
7195/**
7196 * Fetches a data dword and zero extends it to a qword.
7197 *
7198 * @returns Strict VBox status code.
7199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7200 * @param pu64Dst Where to return the qword.
7201 * @param iSegReg The index of the segment register to use for
7202 * this access. The base and limits are checked.
7203 * @param GCPtrMem The address of the guest memory.
7204 */
7205VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7206{
7207 /* The lazy approach for now... */
7208 uint8_t bUnmapInfo;
7209 uint32_t const *pu32Src;
7210 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7211 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7212 if (rc == VINF_SUCCESS)
7213 {
7214 *pu64Dst = *pu32Src;
7215 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7216 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7217 }
7218 return rc;
7219}
7220
7221
7222#ifdef SOME_UNUSED_FUNCTION
7223/**
7224 * Fetches a data dword and sign extends it to a qword.
7225 *
7226 * @returns Strict VBox status code.
7227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7228 * @param pu64Dst Where to return the sign extended value.
7229 * @param iSegReg The index of the segment register to use for
7230 * this access. The base and limits are checked.
7231 * @param GCPtrMem The address of the guest memory.
7232 */
7233VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7234{
7235 /* The lazy approach for now... */
7236 uint8_t bUnmapInfo;
7237 int32_t const *pi32Src;
7238 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7239 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7240 if (rc == VINF_SUCCESS)
7241 {
7242 *pu64Dst = *pi32Src;
7243 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7244 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7245 }
7246#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7247 else
7248 *pu64Dst = 0;
7249#endif
7250 return rc;
7251}
7252#endif
7253
7254
7255/**
7256 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7257 * related.
7258 *
7259 * Raises \#GP(0) if not aligned.
7260 *
7261 * @returns Strict VBox status code.
7262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7263 * @param pu128Dst Where to return the qword.
7264 * @param iSegReg The index of the segment register to use for
7265 * this access. The base and limits are checked.
7266 * @param GCPtrMem The address of the guest memory.
7267 */
7268VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7269{
7270 /* The lazy approach for now... */
7271 uint8_t bUnmapInfo;
7272 PCRTUINT128U pu128Src;
7273 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7274 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7275 if (rc == VINF_SUCCESS)
7276 {
7277 pu128Dst->au64[0] = pu128Src->au64[0];
7278 pu128Dst->au64[1] = pu128Src->au64[1];
7279 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7280 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7281 }
7282 return rc;
7283}
7284
7285
7286#ifdef IEM_WITH_SETJMP
7287/**
7288 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7289 * related, longjmp on error.
7290 *
7291 * Raises \#GP(0) if not aligned.
7292 *
7293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7294 * @param pu128Dst Where to return the qword.
7295 * @param iSegReg The index of the segment register to use for
7296 * this access. The base and limits are checked.
7297 * @param GCPtrMem The address of the guest memory.
7298 */
7299void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7300 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7301{
7302 /* The lazy approach for now... */
7303 uint8_t bUnmapInfo;
7304 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7305 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7306 pu128Dst->au64[0] = pu128Src->au64[0];
7307 pu128Dst->au64[1] = pu128Src->au64[1];
7308 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7309 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7310}
7311#endif
7312
7313
7314/**
7315 * Fetches a data oword (octo word), generally AVX related.
7316 *
7317 * @returns Strict VBox status code.
7318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7319 * @param pu256Dst Where to return the qword.
7320 * @param iSegReg The index of the segment register to use for
7321 * this access. The base and limits are checked.
7322 * @param GCPtrMem The address of the guest memory.
7323 */
7324VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7325{
7326 /* The lazy approach for now... */
7327 uint8_t bUnmapInfo;
7328 PCRTUINT256U pu256Src;
7329 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7330 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7331 if (rc == VINF_SUCCESS)
7332 {
7333 pu256Dst->au64[0] = pu256Src->au64[0];
7334 pu256Dst->au64[1] = pu256Src->au64[1];
7335 pu256Dst->au64[2] = pu256Src->au64[2];
7336 pu256Dst->au64[3] = pu256Src->au64[3];
7337 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7338 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7339 }
7340 return rc;
7341}
7342
7343
7344#ifdef IEM_WITH_SETJMP
7345/**
7346 * Fetches a data oword (octo word), generally AVX related.
7347 *
7348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7349 * @param pu256Dst Where to return the qword.
7350 * @param iSegReg The index of the segment register to use for
7351 * this access. The base and limits are checked.
7352 * @param GCPtrMem The address of the guest memory.
7353 */
7354void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7355{
7356 /* The lazy approach for now... */
7357 uint8_t bUnmapInfo;
7358 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7359 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7360 pu256Dst->au64[0] = pu256Src->au64[0];
7361 pu256Dst->au64[1] = pu256Src->au64[1];
7362 pu256Dst->au64[2] = pu256Src->au64[2];
7363 pu256Dst->au64[3] = pu256Src->au64[3];
7364 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7365 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7366}
7367#endif
7368
7369
7370/**
7371 * Fetches a data oword (octo word) at an aligned address, generally AVX
7372 * related.
7373 *
7374 * Raises \#GP(0) if not aligned.
7375 *
7376 * @returns Strict VBox status code.
7377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7378 * @param pu256Dst Where to return the qword.
7379 * @param iSegReg The index of the segment register to use for
7380 * this access. The base and limits are checked.
7381 * @param GCPtrMem The address of the guest memory.
7382 */
7383VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7384{
7385 /* The lazy approach for now... */
7386 uint8_t bUnmapInfo;
7387 PCRTUINT256U pu256Src;
7388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7389 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7390 if (rc == VINF_SUCCESS)
7391 {
7392 pu256Dst->au64[0] = pu256Src->au64[0];
7393 pu256Dst->au64[1] = pu256Src->au64[1];
7394 pu256Dst->au64[2] = pu256Src->au64[2];
7395 pu256Dst->au64[3] = pu256Src->au64[3];
7396 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7397 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7398 }
7399 return rc;
7400}
7401
7402
7403#ifdef IEM_WITH_SETJMP
7404/**
7405 * Fetches a data oword (octo word) at an aligned address, generally AVX
7406 * related, longjmp on error.
7407 *
7408 * Raises \#GP(0) if not aligned.
7409 *
7410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7411 * @param pu256Dst Where to return the qword.
7412 * @param iSegReg The index of the segment register to use for
7413 * this access. The base and limits are checked.
7414 * @param GCPtrMem The address of the guest memory.
7415 */
7416void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7417 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7418{
7419 /* The lazy approach for now... */
7420 uint8_t bUnmapInfo;
7421 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7422 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7423 pu256Dst->au64[0] = pu256Src->au64[0];
7424 pu256Dst->au64[1] = pu256Src->au64[1];
7425 pu256Dst->au64[2] = pu256Src->au64[2];
7426 pu256Dst->au64[3] = pu256Src->au64[3];
7427 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7428 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7429}
7430#endif
7431
7432
7433
7434/**
7435 * Fetches a descriptor register (lgdt, lidt).
7436 *
7437 * @returns Strict VBox status code.
7438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7439 * @param pcbLimit Where to return the limit.
7440 * @param pGCPtrBase Where to return the base.
7441 * @param iSegReg The index of the segment register to use for
7442 * this access. The base and limits are checked.
7443 * @param GCPtrMem The address of the guest memory.
7444 * @param enmOpSize The effective operand size.
7445 */
7446VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7447 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7448{
7449 /*
7450 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7451 * little special:
7452 * - The two reads are done separately.
7453 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7454 * - We suspect the 386 to actually commit the limit before the base in
7455 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7456 * don't try emulate this eccentric behavior, because it's not well
7457 * enough understood and rather hard to trigger.
7458 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7459 */
7460 VBOXSTRICTRC rcStrict;
7461 if (IEM_IS_64BIT_CODE(pVCpu))
7462 {
7463 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7464 if (rcStrict == VINF_SUCCESS)
7465 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7466 }
7467 else
7468 {
7469 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7470 if (enmOpSize == IEMMODE_32BIT)
7471 {
7472 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7473 {
7474 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7475 if (rcStrict == VINF_SUCCESS)
7476 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7477 }
7478 else
7479 {
7480 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7481 if (rcStrict == VINF_SUCCESS)
7482 {
7483 *pcbLimit = (uint16_t)uTmp;
7484 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7485 }
7486 }
7487 if (rcStrict == VINF_SUCCESS)
7488 *pGCPtrBase = uTmp;
7489 }
7490 else
7491 {
7492 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7493 if (rcStrict == VINF_SUCCESS)
7494 {
7495 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7496 if (rcStrict == VINF_SUCCESS)
7497 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7498 }
7499 }
7500 }
7501 return rcStrict;
7502}
7503
7504
7505/**
7506 * Stores a data dqword, SSE aligned.
7507 *
7508 * @returns Strict VBox status code.
7509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7510 * @param iSegReg The index of the segment register to use for
7511 * this access. The base and limits are checked.
7512 * @param GCPtrMem The address of the guest memory.
7513 * @param u128Value The value to store.
7514 */
7515VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7516{
7517 /* The lazy approach for now... */
7518 uint8_t bUnmapInfo;
7519 PRTUINT128U pu128Dst;
7520 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7521 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7522 if (rc == VINF_SUCCESS)
7523 {
7524 pu128Dst->au64[0] = u128Value.au64[0];
7525 pu128Dst->au64[1] = u128Value.au64[1];
7526 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7527 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7528 }
7529 return rc;
7530}
7531
7532
7533#ifdef IEM_WITH_SETJMP
7534/**
7535 * Stores a data dqword, SSE aligned.
7536 *
7537 * @returns Strict VBox status code.
7538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7539 * @param iSegReg The index of the segment register to use for
7540 * this access. The base and limits are checked.
7541 * @param GCPtrMem The address of the guest memory.
7542 * @param u128Value The value to store.
7543 */
7544void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7545 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7546{
7547 /* The lazy approach for now... */
7548 uint8_t bUnmapInfo;
7549 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7550 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7551 pu128Dst->au64[0] = u128Value.au64[0];
7552 pu128Dst->au64[1] = u128Value.au64[1];
7553 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7554 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7555}
7556#endif
7557
7558
7559/**
7560 * Stores a data dqword.
7561 *
7562 * @returns Strict VBox status code.
7563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7564 * @param iSegReg The index of the segment register to use for
7565 * this access. The base and limits are checked.
7566 * @param GCPtrMem The address of the guest memory.
7567 * @param pu256Value Pointer to the value to store.
7568 */
7569VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7570{
7571 /* The lazy approach for now... */
7572 uint8_t bUnmapInfo;
7573 PRTUINT256U pu256Dst;
7574 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7575 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7576 if (rc == VINF_SUCCESS)
7577 {
7578 pu256Dst->au64[0] = pu256Value->au64[0];
7579 pu256Dst->au64[1] = pu256Value->au64[1];
7580 pu256Dst->au64[2] = pu256Value->au64[2];
7581 pu256Dst->au64[3] = pu256Value->au64[3];
7582 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7583 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7584 }
7585 return rc;
7586}
7587
7588
7589#ifdef IEM_WITH_SETJMP
7590/**
7591 * Stores a data dqword, longjmp on error.
7592 *
7593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7594 * @param iSegReg The index of the segment register to use for
7595 * this access. The base and limits are checked.
7596 * @param GCPtrMem The address of the guest memory.
7597 * @param pu256Value Pointer to the value to store.
7598 */
7599void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7600{
7601 /* The lazy approach for now... */
7602 uint8_t bUnmapInfo;
7603 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7604 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7605 pu256Dst->au64[0] = pu256Value->au64[0];
7606 pu256Dst->au64[1] = pu256Value->au64[1];
7607 pu256Dst->au64[2] = pu256Value->au64[2];
7608 pu256Dst->au64[3] = pu256Value->au64[3];
7609 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7610 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7611}
7612#endif
7613
7614
7615/**
7616 * Stores a data qqword.
7617 *
7618 * @returns Strict VBox status code.
7619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7620 * @param iSegReg The index of the segment register to use for
7621 * this access. The base and limits are checked.
7622 * @param GCPtrMem The address of the guest memory.
7623 * @param pu256Value Pointer to the value to store.
7624 */
7625VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7626{
7627 /* The lazy approach for now... */
7628 uint8_t bUnmapInfo;
7629 PRTUINT256U pu256Dst;
7630 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7631 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7632 if (rc == VINF_SUCCESS)
7633 {
7634 pu256Dst->au64[0] = pu256Value->au64[0];
7635 pu256Dst->au64[1] = pu256Value->au64[1];
7636 pu256Dst->au64[2] = pu256Value->au64[2];
7637 pu256Dst->au64[3] = pu256Value->au64[3];
7638 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7639 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7640 }
7641 return rc;
7642}
7643
7644
7645#ifdef IEM_WITH_SETJMP
7646/**
7647 * Stores a data qqword, longjmp on error.
7648 *
7649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7650 * @param iSegReg The index of the segment register to use for
7651 * this access. The base and limits are checked.
7652 * @param GCPtrMem The address of the guest memory.
7653 * @param pu256Value Pointer to the value to store.
7654 */
7655void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7656{
7657 /* The lazy approach for now... */
7658 uint8_t bUnmapInfo;
7659 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7660 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7661 pu256Dst->au64[0] = pu256Value->au64[0];
7662 pu256Dst->au64[1] = pu256Value->au64[1];
7663 pu256Dst->au64[2] = pu256Value->au64[2];
7664 pu256Dst->au64[3] = pu256Value->au64[3];
7665 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7666 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7667}
7668#endif
7669
7670
7671/**
7672 * Stores a data dqword, AVX \#GP(0) aligned.
7673 *
7674 * @returns Strict VBox status code.
7675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7676 * @param iSegReg The index of the segment register to use for
7677 * this access. The base and limits are checked.
7678 * @param GCPtrMem The address of the guest memory.
7679 * @param pu256Value Pointer to the value to store.
7680 */
7681VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7682{
7683 /* The lazy approach for now... */
7684 uint8_t bUnmapInfo;
7685 PRTUINT256U pu256Dst;
7686 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7687 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7688 if (rc == VINF_SUCCESS)
7689 {
7690 pu256Dst->au64[0] = pu256Value->au64[0];
7691 pu256Dst->au64[1] = pu256Value->au64[1];
7692 pu256Dst->au64[2] = pu256Value->au64[2];
7693 pu256Dst->au64[3] = pu256Value->au64[3];
7694 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7695 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7696 }
7697 return rc;
7698}
7699
7700
7701#ifdef IEM_WITH_SETJMP
7702/**
7703 * Stores a data dqword, AVX aligned.
7704 *
7705 * @returns Strict VBox status code.
7706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7707 * @param iSegReg The index of the segment register to use for
7708 * this access. The base and limits are checked.
7709 * @param GCPtrMem The address of the guest memory.
7710 * @param pu256Value Pointer to the value to store.
7711 */
7712void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7713 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7714{
7715 /* The lazy approach for now... */
7716 uint8_t bUnmapInfo;
7717 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7718 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7719 pu256Dst->au64[0] = pu256Value->au64[0];
7720 pu256Dst->au64[1] = pu256Value->au64[1];
7721 pu256Dst->au64[2] = pu256Value->au64[2];
7722 pu256Dst->au64[3] = pu256Value->au64[3];
7723 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7724 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7725}
7726#endif
7727
7728
7729/**
7730 * Stores a descriptor register (sgdt, sidt).
7731 *
7732 * @returns Strict VBox status code.
7733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7734 * @param cbLimit The limit.
7735 * @param GCPtrBase The base address.
7736 * @param iSegReg The index of the segment register to use for
7737 * this access. The base and limits are checked.
7738 * @param GCPtrMem The address of the guest memory.
7739 */
7740VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7741{
7742 /*
7743 * The SIDT and SGDT instructions actually stores the data using two
7744 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7745 * does not respond to opsize prefixes.
7746 */
7747 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7748 if (rcStrict == VINF_SUCCESS)
7749 {
7750 if (IEM_IS_16BIT_CODE(pVCpu))
7751 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7752 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7753 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7754 else if (IEM_IS_32BIT_CODE(pVCpu))
7755 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7756 else
7757 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7758 }
7759 return rcStrict;
7760}
7761
7762
7763/**
7764 * Begin a special stack push (used by interrupt, exceptions and such).
7765 *
7766 * This will raise \#SS or \#PF if appropriate.
7767 *
7768 * @returns Strict VBox status code.
7769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7770 * @param cbMem The number of bytes to push onto the stack.
7771 * @param cbAlign The alignment mask (7, 3, 1).
7772 * @param ppvMem Where to return the pointer to the stack memory.
7773 * As with the other memory functions this could be
7774 * direct access or bounce buffered access, so
7775 * don't commit register until the commit call
7776 * succeeds.
7777 * @param pbUnmapInfo Where to store unmap info for
7778 * iemMemStackPushCommitSpecial.
7779 * @param puNewRsp Where to return the new RSP value. This must be
7780 * passed unchanged to
7781 * iemMemStackPushCommitSpecial().
7782 */
7783VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7784 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7785{
7786 Assert(cbMem < UINT8_MAX);
7787 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7788 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7789}
7790
7791
7792/**
7793 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7794 *
7795 * This will update the rSP.
7796 *
7797 * @returns Strict VBox status code.
7798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7799 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7800 * @param uNewRsp The new RSP value returned by
7801 * iemMemStackPushBeginSpecial().
7802 */
7803VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7804{
7805 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7806 if (rcStrict == VINF_SUCCESS)
7807 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7808 return rcStrict;
7809}
7810
7811
7812/**
7813 * Begin a special stack pop (used by iret, retf and such).
7814 *
7815 * This will raise \#SS or \#PF if appropriate.
7816 *
7817 * @returns Strict VBox status code.
7818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7819 * @param cbMem The number of bytes to pop from the stack.
7820 * @param cbAlign The alignment mask (7, 3, 1).
7821 * @param ppvMem Where to return the pointer to the stack memory.
7822 * @param pbUnmapInfo Where to store unmap info for
7823 * iemMemStackPopDoneSpecial.
7824 * @param puNewRsp Where to return the new RSP value. This must be
7825 * assigned to CPUMCTX::rsp manually some time
7826 * after iemMemStackPopDoneSpecial() has been
7827 * called.
7828 */
7829VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7830 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7831{
7832 Assert(cbMem < UINT8_MAX);
7833 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7834 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7835}
7836
7837
7838/**
7839 * Continue a special stack pop (used by iret and retf), for the purpose of
7840 * retrieving a new stack pointer.
7841 *
7842 * This will raise \#SS or \#PF if appropriate.
7843 *
7844 * @returns Strict VBox status code.
7845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7846 * @param off Offset from the top of the stack. This is zero
7847 * except in the retf case.
7848 * @param cbMem The number of bytes to pop from the stack.
7849 * @param ppvMem Where to return the pointer to the stack memory.
7850 * @param pbUnmapInfo Where to store unmap info for
7851 * iemMemStackPopDoneSpecial.
7852 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7853 * return this because all use of this function is
7854 * to retrieve a new value and anything we return
7855 * here would be discarded.)
7856 */
7857VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7858 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7859{
7860 Assert(cbMem < UINT8_MAX);
7861
7862 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7863 RTGCPTR GCPtrTop;
7864 if (IEM_IS_64BIT_CODE(pVCpu))
7865 GCPtrTop = uCurNewRsp;
7866 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7867 GCPtrTop = (uint32_t)uCurNewRsp;
7868 else
7869 GCPtrTop = (uint16_t)uCurNewRsp;
7870
7871 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7872 0 /* checked in iemMemStackPopBeginSpecial */);
7873}
7874
7875
7876/**
7877 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7878 * iemMemStackPopContinueSpecial).
7879 *
7880 * The caller will manually commit the rSP.
7881 *
7882 * @returns Strict VBox status code.
7883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7884 * @param bUnmapInfo Unmap information returned by
7885 * iemMemStackPopBeginSpecial() or
7886 * iemMemStackPopContinueSpecial().
7887 */
7888VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7889{
7890 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7891}
7892
7893
7894/**
7895 * Fetches a system table byte.
7896 *
7897 * @returns Strict VBox status code.
7898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7899 * @param pbDst Where to return the byte.
7900 * @param iSegReg The index of the segment register to use for
7901 * this access. The base and limits are checked.
7902 * @param GCPtrMem The address of the guest memory.
7903 */
7904VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7905{
7906 /* The lazy approach for now... */
7907 uint8_t bUnmapInfo;
7908 uint8_t const *pbSrc;
7909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7910 if (rc == VINF_SUCCESS)
7911 {
7912 *pbDst = *pbSrc;
7913 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7914 }
7915 return rc;
7916}
7917
7918
7919/**
7920 * Fetches a system table word.
7921 *
7922 * @returns Strict VBox status code.
7923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7924 * @param pu16Dst Where to return the word.
7925 * @param iSegReg The index of the segment register to use for
7926 * this access. The base and limits are checked.
7927 * @param GCPtrMem The address of the guest memory.
7928 */
7929VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7930{
7931 /* The lazy approach for now... */
7932 uint8_t bUnmapInfo;
7933 uint16_t const *pu16Src;
7934 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7935 if (rc == VINF_SUCCESS)
7936 {
7937 *pu16Dst = *pu16Src;
7938 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7939 }
7940 return rc;
7941}
7942
7943
7944/**
7945 * Fetches a system table dword.
7946 *
7947 * @returns Strict VBox status code.
7948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7949 * @param pu32Dst Where to return the dword.
7950 * @param iSegReg The index of the segment register to use for
7951 * this access. The base and limits are checked.
7952 * @param GCPtrMem The address of the guest memory.
7953 */
7954VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7955{
7956 /* The lazy approach for now... */
7957 uint8_t bUnmapInfo;
7958 uint32_t const *pu32Src;
7959 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7960 if (rc == VINF_SUCCESS)
7961 {
7962 *pu32Dst = *pu32Src;
7963 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7964 }
7965 return rc;
7966}
7967
7968
7969/**
7970 * Fetches a system table qword.
7971 *
7972 * @returns Strict VBox status code.
7973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7974 * @param pu64Dst Where to return the qword.
7975 * @param iSegReg The index of the segment register to use for
7976 * this access. The base and limits are checked.
7977 * @param GCPtrMem The address of the guest memory.
7978 */
7979VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7980{
7981 /* The lazy approach for now... */
7982 uint8_t bUnmapInfo;
7983 uint64_t const *pu64Src;
7984 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7985 if (rc == VINF_SUCCESS)
7986 {
7987 *pu64Dst = *pu64Src;
7988 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7989 }
7990 return rc;
7991}
7992
7993
7994/**
7995 * Fetches a descriptor table entry with caller specified error code.
7996 *
7997 * @returns Strict VBox status code.
7998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7999 * @param pDesc Where to return the descriptor table entry.
8000 * @param uSel The selector which table entry to fetch.
8001 * @param uXcpt The exception to raise on table lookup error.
8002 * @param uErrorCode The error code associated with the exception.
8003 */
8004static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8005 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8006{
8007 AssertPtr(pDesc);
8008 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8009
8010 /** @todo did the 286 require all 8 bytes to be accessible? */
8011 /*
8012 * Get the selector table base and check bounds.
8013 */
8014 RTGCPTR GCPtrBase;
8015 if (uSel & X86_SEL_LDT)
8016 {
8017 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8018 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8019 {
8020 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8021 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8022 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8023 uErrorCode, 0);
8024 }
8025
8026 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8027 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8028 }
8029 else
8030 {
8031 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8032 {
8033 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8034 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8035 uErrorCode, 0);
8036 }
8037 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8038 }
8039
8040 /*
8041 * Read the legacy descriptor and maybe the long mode extensions if
8042 * required.
8043 */
8044 VBOXSTRICTRC rcStrict;
8045 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8046 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8047 else
8048 {
8049 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8050 if (rcStrict == VINF_SUCCESS)
8051 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8052 if (rcStrict == VINF_SUCCESS)
8053 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8054 if (rcStrict == VINF_SUCCESS)
8055 pDesc->Legacy.au16[3] = 0;
8056 else
8057 return rcStrict;
8058 }
8059
8060 if (rcStrict == VINF_SUCCESS)
8061 {
8062 if ( !IEM_IS_LONG_MODE(pVCpu)
8063 || pDesc->Legacy.Gen.u1DescType)
8064 pDesc->Long.au64[1] = 0;
8065 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8066 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8067 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8068 else
8069 {
8070 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8071 /** @todo is this the right exception? */
8072 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8073 }
8074 }
8075 return rcStrict;
8076}
8077
8078
8079/**
8080 * Fetches a descriptor table entry.
8081 *
8082 * @returns Strict VBox status code.
8083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8084 * @param pDesc Where to return the descriptor table entry.
8085 * @param uSel The selector which table entry to fetch.
8086 * @param uXcpt The exception to raise on table lookup error.
8087 */
8088VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8089{
8090 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8091}
8092
8093
8094/**
8095 * Marks the selector descriptor as accessed (only non-system descriptors).
8096 *
8097 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8098 * will therefore skip the limit checks.
8099 *
8100 * @returns Strict VBox status code.
8101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8102 * @param uSel The selector.
8103 */
8104VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8105{
8106 /*
8107 * Get the selector table base and calculate the entry address.
8108 */
8109 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8110 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8111 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8112 GCPtr += uSel & X86_SEL_MASK;
8113
8114 /*
8115 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8116 * ugly stuff to avoid this. This will make sure it's an atomic access
8117 * as well more or less remove any question about 8-bit or 32-bit accesss.
8118 */
8119 VBOXSTRICTRC rcStrict;
8120 uint8_t bUnmapInfo;
8121 uint32_t volatile *pu32;
8122 if ((GCPtr & 3) == 0)
8123 {
8124 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8125 GCPtr += 2 + 2;
8126 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8127 if (rcStrict != VINF_SUCCESS)
8128 return rcStrict;
8129 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8130 }
8131 else
8132 {
8133 /* The misaligned GDT/LDT case, map the whole thing. */
8134 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8135 if (rcStrict != VINF_SUCCESS)
8136 return rcStrict;
8137 switch ((uintptr_t)pu32 & 3)
8138 {
8139 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8140 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8141 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8142 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8143 }
8144 }
8145
8146 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8147}
8148
8149
8150#undef LOG_GROUP
8151#define LOG_GROUP LOG_GROUP_IEM
8152
8153/** @} */
8154
8155/** @name Opcode Helpers.
8156 * @{
8157 */
8158
8159/**
8160 * Calculates the effective address of a ModR/M memory operand.
8161 *
8162 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8163 *
8164 * @return Strict VBox status code.
8165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8166 * @param bRm The ModRM byte.
8167 * @param cbImmAndRspOffset - First byte: The size of any immediate
8168 * following the effective address opcode bytes
8169 * (only for RIP relative addressing).
8170 * - Second byte: RSP displacement (for POP [ESP]).
8171 * @param pGCPtrEff Where to return the effective address.
8172 */
8173VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8174{
8175 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8176# define SET_SS_DEF() \
8177 do \
8178 { \
8179 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8180 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8181 } while (0)
8182
8183 if (!IEM_IS_64BIT_CODE(pVCpu))
8184 {
8185/** @todo Check the effective address size crap! */
8186 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8187 {
8188 uint16_t u16EffAddr;
8189
8190 /* Handle the disp16 form with no registers first. */
8191 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8192 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8193 else
8194 {
8195 /* Get the displacment. */
8196 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8197 {
8198 case 0: u16EffAddr = 0; break;
8199 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8200 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8201 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8202 }
8203
8204 /* Add the base and index registers to the disp. */
8205 switch (bRm & X86_MODRM_RM_MASK)
8206 {
8207 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8208 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8209 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8210 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8211 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8212 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8213 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8214 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8215 }
8216 }
8217
8218 *pGCPtrEff = u16EffAddr;
8219 }
8220 else
8221 {
8222 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8223 uint32_t u32EffAddr;
8224
8225 /* Handle the disp32 form with no registers first. */
8226 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8227 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8228 else
8229 {
8230 /* Get the register (or SIB) value. */
8231 switch ((bRm & X86_MODRM_RM_MASK))
8232 {
8233 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8234 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8235 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8236 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8237 case 4: /* SIB */
8238 {
8239 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8240
8241 /* Get the index and scale it. */
8242 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8243 {
8244 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8245 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8246 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8247 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8248 case 4: u32EffAddr = 0; /*none */ break;
8249 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8250 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8251 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8252 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8253 }
8254 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8255
8256 /* add base */
8257 switch (bSib & X86_SIB_BASE_MASK)
8258 {
8259 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8260 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8261 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8262 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8263 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8264 case 5:
8265 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8266 {
8267 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8268 SET_SS_DEF();
8269 }
8270 else
8271 {
8272 uint32_t u32Disp;
8273 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8274 u32EffAddr += u32Disp;
8275 }
8276 break;
8277 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8278 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8279 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8280 }
8281 break;
8282 }
8283 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8284 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8285 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8286 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8287 }
8288
8289 /* Get and add the displacement. */
8290 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8291 {
8292 case 0:
8293 break;
8294 case 1:
8295 {
8296 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8297 u32EffAddr += i8Disp;
8298 break;
8299 }
8300 case 2:
8301 {
8302 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8303 u32EffAddr += u32Disp;
8304 break;
8305 }
8306 default:
8307 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8308 }
8309
8310 }
8311 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8312 *pGCPtrEff = u32EffAddr;
8313 }
8314 }
8315 else
8316 {
8317 uint64_t u64EffAddr;
8318
8319 /* Handle the rip+disp32 form with no registers first. */
8320 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8321 {
8322 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8323 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8324 }
8325 else
8326 {
8327 /* Get the register (or SIB) value. */
8328 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8329 {
8330 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8331 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8332 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8333 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8334 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8335 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8336 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8337 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8338 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8339 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8340 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8341 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8342 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8343 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8344 /* SIB */
8345 case 4:
8346 case 12:
8347 {
8348 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8349
8350 /* Get the index and scale it. */
8351 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8352 {
8353 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8354 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8355 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8356 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8357 case 4: u64EffAddr = 0; /*none */ break;
8358 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8359 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8360 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8361 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8362 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8363 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8364 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8365 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8366 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8367 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8368 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8369 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8370 }
8371 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8372
8373 /* add base */
8374 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8375 {
8376 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8377 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8378 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8379 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8380 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8381 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8382 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8383 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8384 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8385 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8386 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8387 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8388 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8389 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8390 /* complicated encodings */
8391 case 5:
8392 case 13:
8393 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8394 {
8395 if (!pVCpu->iem.s.uRexB)
8396 {
8397 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8398 SET_SS_DEF();
8399 }
8400 else
8401 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8402 }
8403 else
8404 {
8405 uint32_t u32Disp;
8406 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8407 u64EffAddr += (int32_t)u32Disp;
8408 }
8409 break;
8410 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8411 }
8412 break;
8413 }
8414 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8415 }
8416
8417 /* Get and add the displacement. */
8418 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8419 {
8420 case 0:
8421 break;
8422 case 1:
8423 {
8424 int8_t i8Disp;
8425 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8426 u64EffAddr += i8Disp;
8427 break;
8428 }
8429 case 2:
8430 {
8431 uint32_t u32Disp;
8432 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8433 u64EffAddr += (int32_t)u32Disp;
8434 break;
8435 }
8436 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8437 }
8438
8439 }
8440
8441 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8442 *pGCPtrEff = u64EffAddr;
8443 else
8444 {
8445 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8446 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8447 }
8448 }
8449
8450 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8451 return VINF_SUCCESS;
8452}
8453
8454
8455#ifdef IEM_WITH_SETJMP
8456/**
8457 * Calculates the effective address of a ModR/M memory operand.
8458 *
8459 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8460 *
8461 * May longjmp on internal error.
8462 *
8463 * @return The effective address.
8464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8465 * @param bRm The ModRM byte.
8466 * @param cbImmAndRspOffset - First byte: The size of any immediate
8467 * following the effective address opcode bytes
8468 * (only for RIP relative addressing).
8469 * - Second byte: RSP displacement (for POP [ESP]).
8470 */
8471RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8472{
8473 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8474# define SET_SS_DEF() \
8475 do \
8476 { \
8477 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8478 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8479 } while (0)
8480
8481 if (!IEM_IS_64BIT_CODE(pVCpu))
8482 {
8483/** @todo Check the effective address size crap! */
8484 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8485 {
8486 uint16_t u16EffAddr;
8487
8488 /* Handle the disp16 form with no registers first. */
8489 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8490 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8491 else
8492 {
8493 /* Get the displacment. */
8494 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8495 {
8496 case 0: u16EffAddr = 0; break;
8497 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8498 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8499 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8500 }
8501
8502 /* Add the base and index registers to the disp. */
8503 switch (bRm & X86_MODRM_RM_MASK)
8504 {
8505 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8506 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8507 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8508 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8509 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8510 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8511 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8512 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8513 }
8514 }
8515
8516 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8517 return u16EffAddr;
8518 }
8519
8520 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8521 uint32_t u32EffAddr;
8522
8523 /* Handle the disp32 form with no registers first. */
8524 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8525 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8526 else
8527 {
8528 /* Get the register (or SIB) value. */
8529 switch ((bRm & X86_MODRM_RM_MASK))
8530 {
8531 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8532 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8533 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8534 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8535 case 4: /* SIB */
8536 {
8537 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8538
8539 /* Get the index and scale it. */
8540 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8541 {
8542 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8543 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8544 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8545 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8546 case 4: u32EffAddr = 0; /*none */ break;
8547 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8548 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8549 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8550 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8551 }
8552 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8553
8554 /* add base */
8555 switch (bSib & X86_SIB_BASE_MASK)
8556 {
8557 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8558 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8559 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8560 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8561 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8562 case 5:
8563 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8564 {
8565 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8566 SET_SS_DEF();
8567 }
8568 else
8569 {
8570 uint32_t u32Disp;
8571 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8572 u32EffAddr += u32Disp;
8573 }
8574 break;
8575 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8576 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8577 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8578 }
8579 break;
8580 }
8581 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8582 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8583 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8584 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8585 }
8586
8587 /* Get and add the displacement. */
8588 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8589 {
8590 case 0:
8591 break;
8592 case 1:
8593 {
8594 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8595 u32EffAddr += i8Disp;
8596 break;
8597 }
8598 case 2:
8599 {
8600 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8601 u32EffAddr += u32Disp;
8602 break;
8603 }
8604 default:
8605 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8606 }
8607 }
8608
8609 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8610 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8611 return u32EffAddr;
8612 }
8613
8614 uint64_t u64EffAddr;
8615
8616 /* Handle the rip+disp32 form with no registers first. */
8617 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8618 {
8619 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8620 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8621 }
8622 else
8623 {
8624 /* Get the register (or SIB) value. */
8625 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8626 {
8627 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8628 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8629 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8630 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8631 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8632 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8633 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8634 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8635 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8636 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8637 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8638 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8639 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8640 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8641 /* SIB */
8642 case 4:
8643 case 12:
8644 {
8645 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8646
8647 /* Get the index and scale it. */
8648 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8649 {
8650 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8651 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8652 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8653 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8654 case 4: u64EffAddr = 0; /*none */ break;
8655 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8656 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8657 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8658 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8659 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8660 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8661 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8662 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8663 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8664 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8665 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8666 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8667 }
8668 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8669
8670 /* add base */
8671 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8672 {
8673 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8674 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8675 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8676 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8677 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8678 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8679 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8680 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8681 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8682 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8683 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8684 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8685 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8686 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8687 /* complicated encodings */
8688 case 5:
8689 case 13:
8690 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8691 {
8692 if (!pVCpu->iem.s.uRexB)
8693 {
8694 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8695 SET_SS_DEF();
8696 }
8697 else
8698 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8699 }
8700 else
8701 {
8702 uint32_t u32Disp;
8703 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8704 u64EffAddr += (int32_t)u32Disp;
8705 }
8706 break;
8707 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8708 }
8709 break;
8710 }
8711 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8712 }
8713
8714 /* Get and add the displacement. */
8715 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8716 {
8717 case 0:
8718 break;
8719 case 1:
8720 {
8721 int8_t i8Disp;
8722 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8723 u64EffAddr += i8Disp;
8724 break;
8725 }
8726 case 2:
8727 {
8728 uint32_t u32Disp;
8729 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8730 u64EffAddr += (int32_t)u32Disp;
8731 break;
8732 }
8733 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8734 }
8735
8736 }
8737
8738 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8739 {
8740 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8741 return u64EffAddr;
8742 }
8743 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8744 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8745 return u64EffAddr & UINT32_MAX;
8746}
8747#endif /* IEM_WITH_SETJMP */
8748
8749
8750/**
8751 * Calculates the effective address of a ModR/M memory operand, extended version
8752 * for use in the recompilers.
8753 *
8754 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8755 *
8756 * @return Strict VBox status code.
8757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8758 * @param bRm The ModRM byte.
8759 * @param cbImmAndRspOffset - First byte: The size of any immediate
8760 * following the effective address opcode bytes
8761 * (only for RIP relative addressing).
8762 * - Second byte: RSP displacement (for POP [ESP]).
8763 * @param pGCPtrEff Where to return the effective address.
8764 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8765 * SIB byte (bits 39:32).
8766 */
8767VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8768{
8769 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8770# define SET_SS_DEF() \
8771 do \
8772 { \
8773 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8774 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8775 } while (0)
8776
8777 uint64_t uInfo;
8778 if (!IEM_IS_64BIT_CODE(pVCpu))
8779 {
8780/** @todo Check the effective address size crap! */
8781 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8782 {
8783 uint16_t u16EffAddr;
8784
8785 /* Handle the disp16 form with no registers first. */
8786 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8787 {
8788 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8789 uInfo = u16EffAddr;
8790 }
8791 else
8792 {
8793 /* Get the displacment. */
8794 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8795 {
8796 case 0: u16EffAddr = 0; break;
8797 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8798 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8799 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8800 }
8801 uInfo = u16EffAddr;
8802
8803 /* Add the base and index registers to the disp. */
8804 switch (bRm & X86_MODRM_RM_MASK)
8805 {
8806 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8807 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8808 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8809 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8810 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8811 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8812 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8813 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8814 }
8815 }
8816
8817 *pGCPtrEff = u16EffAddr;
8818 }
8819 else
8820 {
8821 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8822 uint32_t u32EffAddr;
8823
8824 /* Handle the disp32 form with no registers first. */
8825 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8826 {
8827 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8828 uInfo = u32EffAddr;
8829 }
8830 else
8831 {
8832 /* Get the register (or SIB) value. */
8833 uInfo = 0;
8834 switch ((bRm & X86_MODRM_RM_MASK))
8835 {
8836 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8837 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8838 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8839 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8840 case 4: /* SIB */
8841 {
8842 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8843 uInfo = (uint64_t)bSib << 32;
8844
8845 /* Get the index and scale it. */
8846 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8847 {
8848 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8849 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8850 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8851 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8852 case 4: u32EffAddr = 0; /*none */ break;
8853 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8854 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8855 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8856 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8857 }
8858 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8859
8860 /* add base */
8861 switch (bSib & X86_SIB_BASE_MASK)
8862 {
8863 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8864 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8865 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8866 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8867 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8868 case 5:
8869 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8870 {
8871 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8872 SET_SS_DEF();
8873 }
8874 else
8875 {
8876 uint32_t u32Disp;
8877 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8878 u32EffAddr += u32Disp;
8879 uInfo |= u32Disp;
8880 }
8881 break;
8882 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8883 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8884 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8885 }
8886 break;
8887 }
8888 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8889 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8890 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8891 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8892 }
8893
8894 /* Get and add the displacement. */
8895 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8896 {
8897 case 0:
8898 break;
8899 case 1:
8900 {
8901 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8902 u32EffAddr += i8Disp;
8903 uInfo |= (uint32_t)(int32_t)i8Disp;
8904 break;
8905 }
8906 case 2:
8907 {
8908 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8909 u32EffAddr += u32Disp;
8910 uInfo |= (uint32_t)u32Disp;
8911 break;
8912 }
8913 default:
8914 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8915 }
8916
8917 }
8918 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8919 *pGCPtrEff = u32EffAddr;
8920 }
8921 }
8922 else
8923 {
8924 uint64_t u64EffAddr;
8925
8926 /* Handle the rip+disp32 form with no registers first. */
8927 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8928 {
8929 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8930 uInfo = (uint32_t)u64EffAddr;
8931 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8932 }
8933 else
8934 {
8935 /* Get the register (or SIB) value. */
8936 uInfo = 0;
8937 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8938 {
8939 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8940 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8941 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8942 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8943 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8944 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8945 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8946 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8947 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8948 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8949 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8950 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8951 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8952 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8953 /* SIB */
8954 case 4:
8955 case 12:
8956 {
8957 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8958 uInfo = (uint64_t)bSib << 32;
8959
8960 /* Get the index and scale it. */
8961 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8962 {
8963 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8964 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8965 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8966 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8967 case 4: u64EffAddr = 0; /*none */ break;
8968 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8969 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8970 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8971 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8972 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8973 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8974 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8975 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8976 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8977 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8978 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8980 }
8981 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8982
8983 /* add base */
8984 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8985 {
8986 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8987 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8988 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8989 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8990 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8991 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8992 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8993 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8994 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8995 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8996 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8997 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8998 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8999 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9000 /* complicated encodings */
9001 case 5:
9002 case 13:
9003 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9004 {
9005 if (!pVCpu->iem.s.uRexB)
9006 {
9007 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9008 SET_SS_DEF();
9009 }
9010 else
9011 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9012 }
9013 else
9014 {
9015 uint32_t u32Disp;
9016 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9017 u64EffAddr += (int32_t)u32Disp;
9018 uInfo |= u32Disp;
9019 }
9020 break;
9021 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9022 }
9023 break;
9024 }
9025 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9026 }
9027
9028 /* Get and add the displacement. */
9029 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9030 {
9031 case 0:
9032 break;
9033 case 1:
9034 {
9035 int8_t i8Disp;
9036 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9037 u64EffAddr += i8Disp;
9038 uInfo |= (uint32_t)(int32_t)i8Disp;
9039 break;
9040 }
9041 case 2:
9042 {
9043 uint32_t u32Disp;
9044 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9045 u64EffAddr += (int32_t)u32Disp;
9046 uInfo |= u32Disp;
9047 break;
9048 }
9049 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9050 }
9051
9052 }
9053
9054 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9055 *pGCPtrEff = u64EffAddr;
9056 else
9057 {
9058 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9059 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9060 }
9061 }
9062 *puInfo = uInfo;
9063
9064 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9065 return VINF_SUCCESS;
9066}
9067
9068/** @} */
9069
9070
9071#ifdef LOG_ENABLED
9072/**
9073 * Logs the current instruction.
9074 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9075 * @param fSameCtx Set if we have the same context information as the VMM,
9076 * clear if we may have already executed an instruction in
9077 * our debug context. When clear, we assume IEMCPU holds
9078 * valid CPU mode info.
9079 *
9080 * The @a fSameCtx parameter is now misleading and obsolete.
9081 * @param pszFunction The IEM function doing the execution.
9082 */
9083static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9084{
9085# ifdef IN_RING3
9086 if (LogIs2Enabled())
9087 {
9088 char szInstr[256];
9089 uint32_t cbInstr = 0;
9090 if (fSameCtx)
9091 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9092 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9093 szInstr, sizeof(szInstr), &cbInstr);
9094 else
9095 {
9096 uint32_t fFlags = 0;
9097 switch (IEM_GET_CPU_MODE(pVCpu))
9098 {
9099 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9100 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9101 case IEMMODE_16BIT:
9102 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9103 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9104 else
9105 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9106 break;
9107 }
9108 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9109 szInstr, sizeof(szInstr), &cbInstr);
9110 }
9111
9112 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9113 Log2(("**** %s fExec=%x\n"
9114 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9115 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9116 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9117 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9118 " %s\n"
9119 , pszFunction, pVCpu->iem.s.fExec,
9120 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9121 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9122 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9123 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9124 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9125 szInstr));
9126
9127 /* This stuff sucks atm. as it fills the log with MSRs. */
9128 //if (LogIs3Enabled())
9129 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9130 }
9131 else
9132# endif
9133 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9134 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9135 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9136}
9137#endif /* LOG_ENABLED */
9138
9139
9140#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9141/**
9142 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9143 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9144 *
9145 * @returns Modified rcStrict.
9146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9147 * @param rcStrict The instruction execution status.
9148 */
9149static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9150{
9151 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9152 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9153 {
9154 /* VMX preemption timer takes priority over NMI-window exits. */
9155 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9156 {
9157 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9158 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9159 }
9160 /*
9161 * Check remaining intercepts.
9162 *
9163 * NMI-window and Interrupt-window VM-exits.
9164 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9165 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9166 *
9167 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9168 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9169 */
9170 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9171 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9172 && !TRPMHasTrap(pVCpu))
9173 {
9174 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9175 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9176 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9177 {
9178 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9179 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9180 }
9181 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9182 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9183 {
9184 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9185 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9186 }
9187 }
9188 }
9189 /* TPR-below threshold/APIC write has the highest priority. */
9190 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9191 {
9192 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9193 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9194 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9195 }
9196 /* MTF takes priority over VMX-preemption timer. */
9197 else
9198 {
9199 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9200 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9201 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9202 }
9203 return rcStrict;
9204}
9205#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9206
9207
9208/**
9209 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9210 * IEMExecOneWithPrefetchedByPC.
9211 *
9212 * Similar code is found in IEMExecLots.
9213 *
9214 * @return Strict VBox status code.
9215 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9216 * @param fExecuteInhibit If set, execute the instruction following CLI,
9217 * POP SS and MOV SS,GR.
9218 * @param pszFunction The calling function name.
9219 */
9220DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9221{
9222 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9223 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9224 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9225 RT_NOREF_PV(pszFunction);
9226
9227#ifdef IEM_WITH_SETJMP
9228 VBOXSTRICTRC rcStrict;
9229 IEM_TRY_SETJMP(pVCpu, rcStrict)
9230 {
9231 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9232 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9233 }
9234 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9235 {
9236 pVCpu->iem.s.cLongJumps++;
9237 }
9238 IEM_CATCH_LONGJMP_END(pVCpu);
9239#else
9240 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9241 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9242#endif
9243 if (rcStrict == VINF_SUCCESS)
9244 pVCpu->iem.s.cInstructions++;
9245 if (pVCpu->iem.s.cActiveMappings > 0)
9246 {
9247 Assert(rcStrict != VINF_SUCCESS);
9248 iemMemRollback(pVCpu);
9249 }
9250 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9251 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9252 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9253
9254//#ifdef DEBUG
9255// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9256//#endif
9257
9258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9259 /*
9260 * Perform any VMX nested-guest instruction boundary actions.
9261 *
9262 * If any of these causes a VM-exit, we must skip executing the next
9263 * instruction (would run into stale page tables). A VM-exit makes sure
9264 * there is no interrupt-inhibition, so that should ensure we don't go
9265 * to try execute the next instruction. Clearing fExecuteInhibit is
9266 * problematic because of the setjmp/longjmp clobbering above.
9267 */
9268 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9269 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9270 || rcStrict != VINF_SUCCESS)
9271 { /* likely */ }
9272 else
9273 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9274#endif
9275
9276 /* Execute the next instruction as well if a cli, pop ss or
9277 mov ss, Gr has just completed successfully. */
9278 if ( fExecuteInhibit
9279 && rcStrict == VINF_SUCCESS
9280 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9281 {
9282 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9283 if (rcStrict == VINF_SUCCESS)
9284 {
9285#ifdef LOG_ENABLED
9286 iemLogCurInstr(pVCpu, false, pszFunction);
9287#endif
9288#ifdef IEM_WITH_SETJMP
9289 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9290 {
9291 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9292 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9293 }
9294 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9295 {
9296 pVCpu->iem.s.cLongJumps++;
9297 }
9298 IEM_CATCH_LONGJMP_END(pVCpu);
9299#else
9300 IEM_OPCODE_GET_FIRST_U8(&b);
9301 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9302#endif
9303 if (rcStrict == VINF_SUCCESS)
9304 {
9305 pVCpu->iem.s.cInstructions++;
9306#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9307 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9308 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9309 { /* likely */ }
9310 else
9311 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9312#endif
9313 }
9314 if (pVCpu->iem.s.cActiveMappings > 0)
9315 {
9316 Assert(rcStrict != VINF_SUCCESS);
9317 iemMemRollback(pVCpu);
9318 }
9319 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9320 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9321 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9322 }
9323 else if (pVCpu->iem.s.cActiveMappings > 0)
9324 iemMemRollback(pVCpu);
9325 /** @todo drop this after we bake this change into RIP advancing. */
9326 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9327 }
9328
9329 /*
9330 * Return value fiddling, statistics and sanity assertions.
9331 */
9332 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9333
9334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9336 return rcStrict;
9337}
9338
9339
9340/**
9341 * Execute one instruction.
9342 *
9343 * @return Strict VBox status code.
9344 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9345 */
9346VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9347{
9348 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9349#ifdef LOG_ENABLED
9350 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9351#endif
9352
9353 /*
9354 * Do the decoding and emulation.
9355 */
9356 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9357 if (rcStrict == VINF_SUCCESS)
9358 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9359 else if (pVCpu->iem.s.cActiveMappings > 0)
9360 iemMemRollback(pVCpu);
9361
9362 if (rcStrict != VINF_SUCCESS)
9363 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9364 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9365 return rcStrict;
9366}
9367
9368
9369VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9370{
9371 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9372 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9373 if (rcStrict == VINF_SUCCESS)
9374 {
9375 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9376 if (pcbWritten)
9377 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9378 }
9379 else if (pVCpu->iem.s.cActiveMappings > 0)
9380 iemMemRollback(pVCpu);
9381
9382 return rcStrict;
9383}
9384
9385
9386VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9387 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9388{
9389 VBOXSTRICTRC rcStrict;
9390 if ( cbOpcodeBytes
9391 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9392 {
9393 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9394#ifdef IEM_WITH_CODE_TLB
9395 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9396 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9397 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9398 pVCpu->iem.s.offCurInstrStart = 0;
9399 pVCpu->iem.s.offInstrNextByte = 0;
9400 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9401#else
9402 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9403 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9404#endif
9405 rcStrict = VINF_SUCCESS;
9406 }
9407 else
9408 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9409 if (rcStrict == VINF_SUCCESS)
9410 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9411 else if (pVCpu->iem.s.cActiveMappings > 0)
9412 iemMemRollback(pVCpu);
9413
9414 return rcStrict;
9415}
9416
9417
9418VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9419{
9420 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9421 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9422 if (rcStrict == VINF_SUCCESS)
9423 {
9424 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9425 if (pcbWritten)
9426 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9427 }
9428 else if (pVCpu->iem.s.cActiveMappings > 0)
9429 iemMemRollback(pVCpu);
9430
9431 return rcStrict;
9432}
9433
9434
9435VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9436 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9437{
9438 VBOXSTRICTRC rcStrict;
9439 if ( cbOpcodeBytes
9440 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9441 {
9442 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9443#ifdef IEM_WITH_CODE_TLB
9444 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9445 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9446 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9447 pVCpu->iem.s.offCurInstrStart = 0;
9448 pVCpu->iem.s.offInstrNextByte = 0;
9449 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9450#else
9451 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9452 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9453#endif
9454 rcStrict = VINF_SUCCESS;
9455 }
9456 else
9457 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9458 if (rcStrict == VINF_SUCCESS)
9459 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9460 else if (pVCpu->iem.s.cActiveMappings > 0)
9461 iemMemRollback(pVCpu);
9462
9463 return rcStrict;
9464}
9465
9466
9467/**
9468 * For handling split cacheline lock operations when the host has split-lock
9469 * detection enabled.
9470 *
9471 * This will cause the interpreter to disregard the lock prefix and implicit
9472 * locking (xchg).
9473 *
9474 * @returns Strict VBox status code.
9475 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9476 */
9477VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9478{
9479 /*
9480 * Do the decoding and emulation.
9481 */
9482 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9483 if (rcStrict == VINF_SUCCESS)
9484 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9485 else if (pVCpu->iem.s.cActiveMappings > 0)
9486 iemMemRollback(pVCpu);
9487
9488 if (rcStrict != VINF_SUCCESS)
9489 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9490 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9491 return rcStrict;
9492}
9493
9494
9495/**
9496 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9497 * inject a pending TRPM trap.
9498 */
9499VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9500{
9501 Assert(TRPMHasTrap(pVCpu));
9502
9503 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9504 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9505 {
9506 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9507#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9508 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9509 if (fIntrEnabled)
9510 {
9511 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9512 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9513 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9514 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9515 else
9516 {
9517 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9518 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9519 }
9520 }
9521#else
9522 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9523#endif
9524 if (fIntrEnabled)
9525 {
9526 uint8_t u8TrapNo;
9527 TRPMEVENT enmType;
9528 uint32_t uErrCode;
9529 RTGCPTR uCr2;
9530 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9531 AssertRC(rc2);
9532 Assert(enmType == TRPM_HARDWARE_INT);
9533 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9534
9535 TRPMResetTrap(pVCpu);
9536
9537#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9538 /* Injecting an event may cause a VM-exit. */
9539 if ( rcStrict != VINF_SUCCESS
9540 && rcStrict != VINF_IEM_RAISED_XCPT)
9541 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9542#else
9543 NOREF(rcStrict);
9544#endif
9545 }
9546 }
9547
9548 return VINF_SUCCESS;
9549}
9550
9551
9552VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9553{
9554 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9555 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9556 Assert(cMaxInstructions > 0);
9557
9558 /*
9559 * See if there is an interrupt pending in TRPM, inject it if we can.
9560 */
9561 /** @todo What if we are injecting an exception and not an interrupt? Is that
9562 * possible here? For now we assert it is indeed only an interrupt. */
9563 if (!TRPMHasTrap(pVCpu))
9564 { /* likely */ }
9565 else
9566 {
9567 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9568 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9569 { /*likely */ }
9570 else
9571 return rcStrict;
9572 }
9573
9574 /*
9575 * Initial decoder init w/ prefetch, then setup setjmp.
9576 */
9577 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9578 if (rcStrict == VINF_SUCCESS)
9579 {
9580#ifdef IEM_WITH_SETJMP
9581 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9582 IEM_TRY_SETJMP(pVCpu, rcStrict)
9583#endif
9584 {
9585 /*
9586 * The run loop. We limit ourselves to 4096 instructions right now.
9587 */
9588 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9589 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9590 for (;;)
9591 {
9592 /*
9593 * Log the state.
9594 */
9595#ifdef LOG_ENABLED
9596 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9597#endif
9598
9599 /*
9600 * Do the decoding and emulation.
9601 */
9602 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9603 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9604#ifdef VBOX_STRICT
9605 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9606#endif
9607 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9608 {
9609 Assert(pVCpu->iem.s.cActiveMappings == 0);
9610 pVCpu->iem.s.cInstructions++;
9611
9612#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9613 /* Perform any VMX nested-guest instruction boundary actions. */
9614 uint64_t fCpu = pVCpu->fLocalForcedActions;
9615 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9616 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9617 { /* likely */ }
9618 else
9619 {
9620 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9621 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9622 fCpu = pVCpu->fLocalForcedActions;
9623 else
9624 {
9625 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9626 break;
9627 }
9628 }
9629#endif
9630 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9631 {
9632#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9633 uint64_t fCpu = pVCpu->fLocalForcedActions;
9634#endif
9635 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9636 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9637 | VMCPU_FF_TLB_FLUSH
9638 | VMCPU_FF_UNHALT );
9639
9640 if (RT_LIKELY( ( !fCpu
9641 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9642 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9643 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9644 {
9645 if (--cMaxInstructionsGccStupidity > 0)
9646 {
9647 /* Poll timers every now an then according to the caller's specs. */
9648 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9649 || !TMTimerPollBool(pVM, pVCpu))
9650 {
9651 Assert(pVCpu->iem.s.cActiveMappings == 0);
9652 iemReInitDecoder(pVCpu);
9653 continue;
9654 }
9655 }
9656 }
9657 }
9658 Assert(pVCpu->iem.s.cActiveMappings == 0);
9659 }
9660 else if (pVCpu->iem.s.cActiveMappings > 0)
9661 iemMemRollback(pVCpu);
9662 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9663 break;
9664 }
9665 }
9666#ifdef IEM_WITH_SETJMP
9667 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9668 {
9669 if (pVCpu->iem.s.cActiveMappings > 0)
9670 iemMemRollback(pVCpu);
9671# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9672 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9673# endif
9674 pVCpu->iem.s.cLongJumps++;
9675 }
9676 IEM_CATCH_LONGJMP_END(pVCpu);
9677#endif
9678
9679 /*
9680 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9681 */
9682 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9683 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9684 }
9685 else
9686 {
9687 if (pVCpu->iem.s.cActiveMappings > 0)
9688 iemMemRollback(pVCpu);
9689
9690#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9691 /*
9692 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9693 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9694 */
9695 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9696#endif
9697 }
9698
9699 /*
9700 * Maybe re-enter raw-mode and log.
9701 */
9702 if (rcStrict != VINF_SUCCESS)
9703 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9704 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9705 if (pcInstructions)
9706 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9707 return rcStrict;
9708}
9709
9710
9711/**
9712 * Interface used by EMExecuteExec, does exit statistics and limits.
9713 *
9714 * @returns Strict VBox status code.
9715 * @param pVCpu The cross context virtual CPU structure.
9716 * @param fWillExit To be defined.
9717 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9718 * @param cMaxInstructions Maximum number of instructions to execute.
9719 * @param cMaxInstructionsWithoutExits
9720 * The max number of instructions without exits.
9721 * @param pStats Where to return statistics.
9722 */
9723VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9724 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9725{
9726 NOREF(fWillExit); /** @todo define flexible exit crits */
9727
9728 /*
9729 * Initialize return stats.
9730 */
9731 pStats->cInstructions = 0;
9732 pStats->cExits = 0;
9733 pStats->cMaxExitDistance = 0;
9734 pStats->cReserved = 0;
9735
9736 /*
9737 * Initial decoder init w/ prefetch, then setup setjmp.
9738 */
9739 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9740 if (rcStrict == VINF_SUCCESS)
9741 {
9742#ifdef IEM_WITH_SETJMP
9743 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9744 IEM_TRY_SETJMP(pVCpu, rcStrict)
9745#endif
9746 {
9747#ifdef IN_RING0
9748 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9749#endif
9750 uint32_t cInstructionSinceLastExit = 0;
9751
9752 /*
9753 * The run loop. We limit ourselves to 4096 instructions right now.
9754 */
9755 PVM pVM = pVCpu->CTX_SUFF(pVM);
9756 for (;;)
9757 {
9758 /*
9759 * Log the state.
9760 */
9761#ifdef LOG_ENABLED
9762 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9763#endif
9764
9765 /*
9766 * Do the decoding and emulation.
9767 */
9768 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9769
9770 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9771 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9772
9773 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9774 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9775 {
9776 pStats->cExits += 1;
9777 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9778 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9779 cInstructionSinceLastExit = 0;
9780 }
9781
9782 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9783 {
9784 Assert(pVCpu->iem.s.cActiveMappings == 0);
9785 pVCpu->iem.s.cInstructions++;
9786 pStats->cInstructions++;
9787 cInstructionSinceLastExit++;
9788
9789#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9790 /* Perform any VMX nested-guest instruction boundary actions. */
9791 uint64_t fCpu = pVCpu->fLocalForcedActions;
9792 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9793 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9794 { /* likely */ }
9795 else
9796 {
9797 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9798 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9799 fCpu = pVCpu->fLocalForcedActions;
9800 else
9801 {
9802 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9803 break;
9804 }
9805 }
9806#endif
9807 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9808 {
9809#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9810 uint64_t fCpu = pVCpu->fLocalForcedActions;
9811#endif
9812 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9813 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9814 | VMCPU_FF_TLB_FLUSH
9815 | VMCPU_FF_UNHALT );
9816 if (RT_LIKELY( ( ( !fCpu
9817 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9818 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9819 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9820 || pStats->cInstructions < cMinInstructions))
9821 {
9822 if (pStats->cInstructions < cMaxInstructions)
9823 {
9824 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9825 {
9826#ifdef IN_RING0
9827 if ( !fCheckPreemptionPending
9828 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9829#endif
9830 {
9831 Assert(pVCpu->iem.s.cActiveMappings == 0);
9832 iemReInitDecoder(pVCpu);
9833 continue;
9834 }
9835#ifdef IN_RING0
9836 rcStrict = VINF_EM_RAW_INTERRUPT;
9837 break;
9838#endif
9839 }
9840 }
9841 }
9842 Assert(!(fCpu & VMCPU_FF_IEM));
9843 }
9844 Assert(pVCpu->iem.s.cActiveMappings == 0);
9845 }
9846 else if (pVCpu->iem.s.cActiveMappings > 0)
9847 iemMemRollback(pVCpu);
9848 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9849 break;
9850 }
9851 }
9852#ifdef IEM_WITH_SETJMP
9853 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9854 {
9855 if (pVCpu->iem.s.cActiveMappings > 0)
9856 iemMemRollback(pVCpu);
9857 pVCpu->iem.s.cLongJumps++;
9858 }
9859 IEM_CATCH_LONGJMP_END(pVCpu);
9860#endif
9861
9862 /*
9863 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9864 */
9865 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9866 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9867 }
9868 else
9869 {
9870 if (pVCpu->iem.s.cActiveMappings > 0)
9871 iemMemRollback(pVCpu);
9872
9873#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9874 /*
9875 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9876 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9877 */
9878 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9879#endif
9880 }
9881
9882 /*
9883 * Maybe re-enter raw-mode and log.
9884 */
9885 if (rcStrict != VINF_SUCCESS)
9886 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9887 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9888 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9889 return rcStrict;
9890}
9891
9892
9893/**
9894 * Injects a trap, fault, abort, software interrupt or external interrupt.
9895 *
9896 * The parameter list matches TRPMQueryTrapAll pretty closely.
9897 *
9898 * @returns Strict VBox status code.
9899 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9900 * @param u8TrapNo The trap number.
9901 * @param enmType What type is it (trap/fault/abort), software
9902 * interrupt or hardware interrupt.
9903 * @param uErrCode The error code if applicable.
9904 * @param uCr2 The CR2 value if applicable.
9905 * @param cbInstr The instruction length (only relevant for
9906 * software interrupts).
9907 */
9908VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9909 uint8_t cbInstr)
9910{
9911 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9912#ifdef DBGFTRACE_ENABLED
9913 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9914 u8TrapNo, enmType, uErrCode, uCr2);
9915#endif
9916
9917 uint32_t fFlags;
9918 switch (enmType)
9919 {
9920 case TRPM_HARDWARE_INT:
9921 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9922 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9923 uErrCode = uCr2 = 0;
9924 break;
9925
9926 case TRPM_SOFTWARE_INT:
9927 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9928 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9929 uErrCode = uCr2 = 0;
9930 break;
9931
9932 case TRPM_TRAP:
9933 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9934 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9935 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9936 if (u8TrapNo == X86_XCPT_PF)
9937 fFlags |= IEM_XCPT_FLAGS_CR2;
9938 switch (u8TrapNo)
9939 {
9940 case X86_XCPT_DF:
9941 case X86_XCPT_TS:
9942 case X86_XCPT_NP:
9943 case X86_XCPT_SS:
9944 case X86_XCPT_PF:
9945 case X86_XCPT_AC:
9946 case X86_XCPT_GP:
9947 fFlags |= IEM_XCPT_FLAGS_ERR;
9948 break;
9949 }
9950 break;
9951
9952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9953 }
9954
9955 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9956
9957 if (pVCpu->iem.s.cActiveMappings > 0)
9958 iemMemRollback(pVCpu);
9959
9960 return rcStrict;
9961}
9962
9963
9964/**
9965 * Injects the active TRPM event.
9966 *
9967 * @returns Strict VBox status code.
9968 * @param pVCpu The cross context virtual CPU structure.
9969 */
9970VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9971{
9972#ifndef IEM_IMPLEMENTS_TASKSWITCH
9973 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9974#else
9975 uint8_t u8TrapNo;
9976 TRPMEVENT enmType;
9977 uint32_t uErrCode;
9978 RTGCUINTPTR uCr2;
9979 uint8_t cbInstr;
9980 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9981 if (RT_FAILURE(rc))
9982 return rc;
9983
9984 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9985 * ICEBP \#DB injection as a special case. */
9986 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9987#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9988 if (rcStrict == VINF_SVM_VMEXIT)
9989 rcStrict = VINF_SUCCESS;
9990#endif
9991#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9992 if (rcStrict == VINF_VMX_VMEXIT)
9993 rcStrict = VINF_SUCCESS;
9994#endif
9995 /** @todo Are there any other codes that imply the event was successfully
9996 * delivered to the guest? See @bugref{6607}. */
9997 if ( rcStrict == VINF_SUCCESS
9998 || rcStrict == VINF_IEM_RAISED_XCPT)
9999 TRPMResetTrap(pVCpu);
10000
10001 return rcStrict;
10002#endif
10003}
10004
10005
10006VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10007{
10008 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10009 return VERR_NOT_IMPLEMENTED;
10010}
10011
10012
10013VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10014{
10015 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10016 return VERR_NOT_IMPLEMENTED;
10017}
10018
10019
10020/**
10021 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10022 *
10023 * This API ASSUMES that the caller has already verified that the guest code is
10024 * allowed to access the I/O port. (The I/O port is in the DX register in the
10025 * guest state.)
10026 *
10027 * @returns Strict VBox status code.
10028 * @param pVCpu The cross context virtual CPU structure.
10029 * @param cbValue The size of the I/O port access (1, 2, or 4).
10030 * @param enmAddrMode The addressing mode.
10031 * @param fRepPrefix Indicates whether a repeat prefix is used
10032 * (doesn't matter which for this instruction).
10033 * @param cbInstr The instruction length in bytes.
10034 * @param iEffSeg The effective segment address.
10035 * @param fIoChecked Whether the access to the I/O port has been
10036 * checked or not. It's typically checked in the
10037 * HM scenario.
10038 */
10039VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10040 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10041{
10042 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10043 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10044
10045 /*
10046 * State init.
10047 */
10048 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10049
10050 /*
10051 * Switch orgy for getting to the right handler.
10052 */
10053 VBOXSTRICTRC rcStrict;
10054 if (fRepPrefix)
10055 {
10056 switch (enmAddrMode)
10057 {
10058 case IEMMODE_16BIT:
10059 switch (cbValue)
10060 {
10061 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10062 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10063 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10064 default:
10065 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10066 }
10067 break;
10068
10069 case IEMMODE_32BIT:
10070 switch (cbValue)
10071 {
10072 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10073 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10074 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10075 default:
10076 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10077 }
10078 break;
10079
10080 case IEMMODE_64BIT:
10081 switch (cbValue)
10082 {
10083 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10084 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10085 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10086 default:
10087 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10088 }
10089 break;
10090
10091 default:
10092 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10093 }
10094 }
10095 else
10096 {
10097 switch (enmAddrMode)
10098 {
10099 case IEMMODE_16BIT:
10100 switch (cbValue)
10101 {
10102 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10103 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10104 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10105 default:
10106 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10107 }
10108 break;
10109
10110 case IEMMODE_32BIT:
10111 switch (cbValue)
10112 {
10113 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10114 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10115 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10116 default:
10117 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10118 }
10119 break;
10120
10121 case IEMMODE_64BIT:
10122 switch (cbValue)
10123 {
10124 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10125 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10126 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10127 default:
10128 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10129 }
10130 break;
10131
10132 default:
10133 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10134 }
10135 }
10136
10137 if (pVCpu->iem.s.cActiveMappings)
10138 iemMemRollback(pVCpu);
10139
10140 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10141}
10142
10143
10144/**
10145 * Interface for HM and EM for executing string I/O IN (read) instructions.
10146 *
10147 * This API ASSUMES that the caller has already verified that the guest code is
10148 * allowed to access the I/O port. (The I/O port is in the DX register in the
10149 * guest state.)
10150 *
10151 * @returns Strict VBox status code.
10152 * @param pVCpu The cross context virtual CPU structure.
10153 * @param cbValue The size of the I/O port access (1, 2, or 4).
10154 * @param enmAddrMode The addressing mode.
10155 * @param fRepPrefix Indicates whether a repeat prefix is used
10156 * (doesn't matter which for this instruction).
10157 * @param cbInstr The instruction length in bytes.
10158 * @param fIoChecked Whether the access to the I/O port has been
10159 * checked or not. It's typically checked in the
10160 * HM scenario.
10161 */
10162VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10163 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10164{
10165 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10166
10167 /*
10168 * State init.
10169 */
10170 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10171
10172 /*
10173 * Switch orgy for getting to the right handler.
10174 */
10175 VBOXSTRICTRC rcStrict;
10176 if (fRepPrefix)
10177 {
10178 switch (enmAddrMode)
10179 {
10180 case IEMMODE_16BIT:
10181 switch (cbValue)
10182 {
10183 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10184 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10185 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10186 default:
10187 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10188 }
10189 break;
10190
10191 case IEMMODE_32BIT:
10192 switch (cbValue)
10193 {
10194 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10195 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10196 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10197 default:
10198 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10199 }
10200 break;
10201
10202 case IEMMODE_64BIT:
10203 switch (cbValue)
10204 {
10205 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10206 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10207 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10208 default:
10209 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10210 }
10211 break;
10212
10213 default:
10214 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10215 }
10216 }
10217 else
10218 {
10219 switch (enmAddrMode)
10220 {
10221 case IEMMODE_16BIT:
10222 switch (cbValue)
10223 {
10224 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10225 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10226 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10227 default:
10228 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10229 }
10230 break;
10231
10232 case IEMMODE_32BIT:
10233 switch (cbValue)
10234 {
10235 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10236 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10237 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10238 default:
10239 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10240 }
10241 break;
10242
10243 case IEMMODE_64BIT:
10244 switch (cbValue)
10245 {
10246 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10247 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10248 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10249 default:
10250 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10251 }
10252 break;
10253
10254 default:
10255 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10256 }
10257 }
10258
10259 if ( pVCpu->iem.s.cActiveMappings == 0
10260 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10261 { /* likely */ }
10262 else
10263 {
10264 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10265 iemMemRollback(pVCpu);
10266 }
10267 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10268}
10269
10270
10271/**
10272 * Interface for rawmode to write execute an OUT instruction.
10273 *
10274 * @returns Strict VBox status code.
10275 * @param pVCpu The cross context virtual CPU structure.
10276 * @param cbInstr The instruction length in bytes.
10277 * @param u16Port The port to read.
10278 * @param fImm Whether the port is specified using an immediate operand or
10279 * using the implicit DX register.
10280 * @param cbReg The register size.
10281 *
10282 * @remarks In ring-0 not all of the state needs to be synced in.
10283 */
10284VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10285{
10286 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10287 Assert(cbReg <= 4 && cbReg != 3);
10288
10289 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10290 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10291 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10292 Assert(!pVCpu->iem.s.cActiveMappings);
10293 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10294}
10295
10296
10297/**
10298 * Interface for rawmode to write execute an IN instruction.
10299 *
10300 * @returns Strict VBox status code.
10301 * @param pVCpu The cross context virtual CPU structure.
10302 * @param cbInstr The instruction length in bytes.
10303 * @param u16Port The port to read.
10304 * @param fImm Whether the port is specified using an immediate operand or
10305 * using the implicit DX.
10306 * @param cbReg The register size.
10307 */
10308VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10309{
10310 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10311 Assert(cbReg <= 4 && cbReg != 3);
10312
10313 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10314 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10315 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10316 Assert(!pVCpu->iem.s.cActiveMappings);
10317 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10318}
10319
10320
10321/**
10322 * Interface for HM and EM to write to a CRx register.
10323 *
10324 * @returns Strict VBox status code.
10325 * @param pVCpu The cross context virtual CPU structure.
10326 * @param cbInstr The instruction length in bytes.
10327 * @param iCrReg The control register number (destination).
10328 * @param iGReg The general purpose register number (source).
10329 *
10330 * @remarks In ring-0 not all of the state needs to be synced in.
10331 */
10332VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10333{
10334 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10335 Assert(iCrReg < 16);
10336 Assert(iGReg < 16);
10337
10338 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10339 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10340 Assert(!pVCpu->iem.s.cActiveMappings);
10341 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10342}
10343
10344
10345/**
10346 * Interface for HM and EM to read from a CRx register.
10347 *
10348 * @returns Strict VBox status code.
10349 * @param pVCpu The cross context virtual CPU structure.
10350 * @param cbInstr The instruction length in bytes.
10351 * @param iGReg The general purpose register number (destination).
10352 * @param iCrReg The control register number (source).
10353 *
10354 * @remarks In ring-0 not all of the state needs to be synced in.
10355 */
10356VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10357{
10358 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10359 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10360 | CPUMCTX_EXTRN_APIC_TPR);
10361 Assert(iCrReg < 16);
10362 Assert(iGReg < 16);
10363
10364 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10365 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10366 Assert(!pVCpu->iem.s.cActiveMappings);
10367 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10368}
10369
10370
10371/**
10372 * Interface for HM and EM to write to a DRx register.
10373 *
10374 * @returns Strict VBox status code.
10375 * @param pVCpu The cross context virtual CPU structure.
10376 * @param cbInstr The instruction length in bytes.
10377 * @param iDrReg The debug register number (destination).
10378 * @param iGReg The general purpose register number (source).
10379 *
10380 * @remarks In ring-0 not all of the state needs to be synced in.
10381 */
10382VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10383{
10384 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10385 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10386 Assert(iDrReg < 8);
10387 Assert(iGReg < 16);
10388
10389 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10390 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10391 Assert(!pVCpu->iem.s.cActiveMappings);
10392 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10393}
10394
10395
10396/**
10397 * Interface for HM and EM to read from a DRx register.
10398 *
10399 * @returns Strict VBox status code.
10400 * @param pVCpu The cross context virtual CPU structure.
10401 * @param cbInstr The instruction length in bytes.
10402 * @param iGReg The general purpose register number (destination).
10403 * @param iDrReg The debug register number (source).
10404 *
10405 * @remarks In ring-0 not all of the state needs to be synced in.
10406 */
10407VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10408{
10409 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10410 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10411 Assert(iDrReg < 8);
10412 Assert(iGReg < 16);
10413
10414 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10415 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10416 Assert(!pVCpu->iem.s.cActiveMappings);
10417 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10418}
10419
10420
10421/**
10422 * Interface for HM and EM to clear the CR0[TS] bit.
10423 *
10424 * @returns Strict VBox status code.
10425 * @param pVCpu The cross context virtual CPU structure.
10426 * @param cbInstr The instruction length in bytes.
10427 *
10428 * @remarks In ring-0 not all of the state needs to be synced in.
10429 */
10430VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10431{
10432 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10433
10434 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10435 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10436 Assert(!pVCpu->iem.s.cActiveMappings);
10437 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10438}
10439
10440
10441/**
10442 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10443 *
10444 * @returns Strict VBox status code.
10445 * @param pVCpu The cross context virtual CPU structure.
10446 * @param cbInstr The instruction length in bytes.
10447 * @param uValue The value to load into CR0.
10448 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10449 * memory operand. Otherwise pass NIL_RTGCPTR.
10450 *
10451 * @remarks In ring-0 not all of the state needs to be synced in.
10452 */
10453VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10454{
10455 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10456
10457 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10458 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10459 Assert(!pVCpu->iem.s.cActiveMappings);
10460 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10461}
10462
10463
10464/**
10465 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10466 *
10467 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10468 *
10469 * @returns Strict VBox status code.
10470 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10471 * @param cbInstr The instruction length in bytes.
10472 * @remarks In ring-0 not all of the state needs to be synced in.
10473 * @thread EMT(pVCpu)
10474 */
10475VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10476{
10477 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10478
10479 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10480 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10481 Assert(!pVCpu->iem.s.cActiveMappings);
10482 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10483}
10484
10485
10486/**
10487 * Interface for HM and EM to emulate the WBINVD instruction.
10488 *
10489 * @returns Strict VBox status code.
10490 * @param pVCpu The cross context virtual CPU structure.
10491 * @param cbInstr The instruction length in bytes.
10492 *
10493 * @remarks In ring-0 not all of the state needs to be synced in.
10494 */
10495VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10496{
10497 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10498
10499 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10500 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10501 Assert(!pVCpu->iem.s.cActiveMappings);
10502 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10503}
10504
10505
10506/**
10507 * Interface for HM and EM to emulate the INVD instruction.
10508 *
10509 * @returns Strict VBox status code.
10510 * @param pVCpu The cross context virtual CPU structure.
10511 * @param cbInstr The instruction length in bytes.
10512 *
10513 * @remarks In ring-0 not all of the state needs to be synced in.
10514 */
10515VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10516{
10517 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10518
10519 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10520 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10521 Assert(!pVCpu->iem.s.cActiveMappings);
10522 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10523}
10524
10525
10526/**
10527 * Interface for HM and EM to emulate the INVLPG instruction.
10528 *
10529 * @returns Strict VBox status code.
10530 * @retval VINF_PGM_SYNC_CR3
10531 *
10532 * @param pVCpu The cross context virtual CPU structure.
10533 * @param cbInstr The instruction length in bytes.
10534 * @param GCPtrPage The effective address of the page to invalidate.
10535 *
10536 * @remarks In ring-0 not all of the state needs to be synced in.
10537 */
10538VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10539{
10540 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10541
10542 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10543 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10544 Assert(!pVCpu->iem.s.cActiveMappings);
10545 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10546}
10547
10548
10549/**
10550 * Interface for HM and EM to emulate the INVPCID instruction.
10551 *
10552 * @returns Strict VBox status code.
10553 * @retval VINF_PGM_SYNC_CR3
10554 *
10555 * @param pVCpu The cross context virtual CPU structure.
10556 * @param cbInstr The instruction length in bytes.
10557 * @param iEffSeg The effective segment register.
10558 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10559 * @param uType The invalidation type.
10560 *
10561 * @remarks In ring-0 not all of the state needs to be synced in.
10562 */
10563VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10564 uint64_t uType)
10565{
10566 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10567
10568 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10569 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10570 Assert(!pVCpu->iem.s.cActiveMappings);
10571 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10572}
10573
10574
10575/**
10576 * Interface for HM and EM to emulate the CPUID instruction.
10577 *
10578 * @returns Strict VBox status code.
10579 *
10580 * @param pVCpu The cross context virtual CPU structure.
10581 * @param cbInstr The instruction length in bytes.
10582 *
10583 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10584 */
10585VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10586{
10587 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10588 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10589
10590 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10591 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10592 Assert(!pVCpu->iem.s.cActiveMappings);
10593 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10594}
10595
10596
10597/**
10598 * Interface for HM and EM to emulate the RDPMC instruction.
10599 *
10600 * @returns Strict VBox status code.
10601 *
10602 * @param pVCpu The cross context virtual CPU structure.
10603 * @param cbInstr The instruction length in bytes.
10604 *
10605 * @remarks Not all of the state needs to be synced in.
10606 */
10607VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10608{
10609 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10610 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10611
10612 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10613 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10614 Assert(!pVCpu->iem.s.cActiveMappings);
10615 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10616}
10617
10618
10619/**
10620 * Interface for HM and EM to emulate the RDTSC instruction.
10621 *
10622 * @returns Strict VBox status code.
10623 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10624 *
10625 * @param pVCpu The cross context virtual CPU structure.
10626 * @param cbInstr The instruction length in bytes.
10627 *
10628 * @remarks Not all of the state needs to be synced in.
10629 */
10630VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10631{
10632 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10633 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10634
10635 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10636 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10637 Assert(!pVCpu->iem.s.cActiveMappings);
10638 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10639}
10640
10641
10642/**
10643 * Interface for HM and EM to emulate the RDTSCP instruction.
10644 *
10645 * @returns Strict VBox status code.
10646 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10647 *
10648 * @param pVCpu The cross context virtual CPU structure.
10649 * @param cbInstr The instruction length in bytes.
10650 *
10651 * @remarks Not all of the state needs to be synced in. Recommended
10652 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10653 */
10654VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10655{
10656 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10657 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10658
10659 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10660 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10661 Assert(!pVCpu->iem.s.cActiveMappings);
10662 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10663}
10664
10665
10666/**
10667 * Interface for HM and EM to emulate the RDMSR instruction.
10668 *
10669 * @returns Strict VBox status code.
10670 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10671 *
10672 * @param pVCpu The cross context virtual CPU structure.
10673 * @param cbInstr The instruction length in bytes.
10674 *
10675 * @remarks Not all of the state needs to be synced in. Requires RCX and
10676 * (currently) all MSRs.
10677 */
10678VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10679{
10680 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10681 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10682
10683 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10684 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10685 Assert(!pVCpu->iem.s.cActiveMappings);
10686 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10687}
10688
10689
10690/**
10691 * Interface for HM and EM to emulate the WRMSR instruction.
10692 *
10693 * @returns Strict VBox status code.
10694 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10695 *
10696 * @param pVCpu The cross context virtual CPU structure.
10697 * @param cbInstr The instruction length in bytes.
10698 *
10699 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10700 * and (currently) all MSRs.
10701 */
10702VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10703{
10704 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10705 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10706 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10707
10708 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10709 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10710 Assert(!pVCpu->iem.s.cActiveMappings);
10711 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10712}
10713
10714
10715/**
10716 * Interface for HM and EM to emulate the MONITOR instruction.
10717 *
10718 * @returns Strict VBox status code.
10719 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10720 *
10721 * @param pVCpu The cross context virtual CPU structure.
10722 * @param cbInstr The instruction length in bytes.
10723 *
10724 * @remarks Not all of the state needs to be synced in.
10725 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10726 * are used.
10727 */
10728VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10729{
10730 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10731 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10732
10733 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10734 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10735 Assert(!pVCpu->iem.s.cActiveMappings);
10736 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10737}
10738
10739
10740/**
10741 * Interface for HM and EM to emulate the MWAIT instruction.
10742 *
10743 * @returns Strict VBox status code.
10744 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10745 *
10746 * @param pVCpu The cross context virtual CPU structure.
10747 * @param cbInstr The instruction length in bytes.
10748 *
10749 * @remarks Not all of the state needs to be synced in.
10750 */
10751VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10752{
10753 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10754 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10755
10756 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10757 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10758 Assert(!pVCpu->iem.s.cActiveMappings);
10759 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10760}
10761
10762
10763/**
10764 * Interface for HM and EM to emulate the HLT instruction.
10765 *
10766 * @returns Strict VBox status code.
10767 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10768 *
10769 * @param pVCpu The cross context virtual CPU structure.
10770 * @param cbInstr The instruction length in bytes.
10771 *
10772 * @remarks Not all of the state needs to be synced in.
10773 */
10774VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10775{
10776 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10777
10778 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10779 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10780 Assert(!pVCpu->iem.s.cActiveMappings);
10781 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10782}
10783
10784
10785/**
10786 * Checks if IEM is in the process of delivering an event (interrupt or
10787 * exception).
10788 *
10789 * @returns true if we're in the process of raising an interrupt or exception,
10790 * false otherwise.
10791 * @param pVCpu The cross context virtual CPU structure.
10792 * @param puVector Where to store the vector associated with the
10793 * currently delivered event, optional.
10794 * @param pfFlags Where to store th event delivery flags (see
10795 * IEM_XCPT_FLAGS_XXX), optional.
10796 * @param puErr Where to store the error code associated with the
10797 * event, optional.
10798 * @param puCr2 Where to store the CR2 associated with the event,
10799 * optional.
10800 * @remarks The caller should check the flags to determine if the error code and
10801 * CR2 are valid for the event.
10802 */
10803VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10804{
10805 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10806 if (fRaisingXcpt)
10807 {
10808 if (puVector)
10809 *puVector = pVCpu->iem.s.uCurXcpt;
10810 if (pfFlags)
10811 *pfFlags = pVCpu->iem.s.fCurXcpt;
10812 if (puErr)
10813 *puErr = pVCpu->iem.s.uCurXcptErr;
10814 if (puCr2)
10815 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10816 }
10817 return fRaisingXcpt;
10818}
10819
10820#ifdef IN_RING3
10821
10822/**
10823 * Handles the unlikely and probably fatal merge cases.
10824 *
10825 * @returns Merged status code.
10826 * @param rcStrict Current EM status code.
10827 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10828 * with @a rcStrict.
10829 * @param iMemMap The memory mapping index. For error reporting only.
10830 * @param pVCpu The cross context virtual CPU structure of the calling
10831 * thread, for error reporting only.
10832 */
10833DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10834 unsigned iMemMap, PVMCPUCC pVCpu)
10835{
10836 if (RT_FAILURE_NP(rcStrict))
10837 return rcStrict;
10838
10839 if (RT_FAILURE_NP(rcStrictCommit))
10840 return rcStrictCommit;
10841
10842 if (rcStrict == rcStrictCommit)
10843 return rcStrictCommit;
10844
10845 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10846 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10847 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10849 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10850 return VERR_IOM_FF_STATUS_IPE;
10851}
10852
10853
10854/**
10855 * Helper for IOMR3ProcessForceFlag.
10856 *
10857 * @returns Merged status code.
10858 * @param rcStrict Current EM status code.
10859 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10860 * with @a rcStrict.
10861 * @param iMemMap The memory mapping index. For error reporting only.
10862 * @param pVCpu The cross context virtual CPU structure of the calling
10863 * thread, for error reporting only.
10864 */
10865DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10866{
10867 /* Simple. */
10868 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10869 return rcStrictCommit;
10870
10871 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10872 return rcStrict;
10873
10874 /* EM scheduling status codes. */
10875 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10876 && rcStrict <= VINF_EM_LAST))
10877 {
10878 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10879 && rcStrictCommit <= VINF_EM_LAST))
10880 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10881 }
10882
10883 /* Unlikely */
10884 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10885}
10886
10887
10888/**
10889 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10890 *
10891 * @returns Merge between @a rcStrict and what the commit operation returned.
10892 * @param pVM The cross context VM structure.
10893 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10894 * @param rcStrict The status code returned by ring-0 or raw-mode.
10895 */
10896VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10897{
10898 /*
10899 * Reset the pending commit.
10900 */
10901 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10902 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10903 ("%#x %#x %#x\n",
10904 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10905 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10906
10907 /*
10908 * Commit the pending bounce buffers (usually just one).
10909 */
10910 unsigned cBufs = 0;
10911 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10912 while (iMemMap-- > 0)
10913 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10914 {
10915 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10916 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10917 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10918
10919 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10920 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10921 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10922
10923 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10924 {
10925 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10927 pbBuf,
10928 cbFirst,
10929 PGMACCESSORIGIN_IEM);
10930 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10931 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10932 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10933 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10934 }
10935
10936 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10937 {
10938 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10939 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10940 pbBuf + cbFirst,
10941 cbSecond,
10942 PGMACCESSORIGIN_IEM);
10943 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10944 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10945 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10946 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10947 }
10948 cBufs++;
10949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10950 }
10951
10952 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10953 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10954 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10955 pVCpu->iem.s.cActiveMappings = 0;
10956 return rcStrict;
10957}
10958
10959#endif /* IN_RING3 */
10960
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette