VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 96811

Last change on this file since 96811 was 96811, checked in by vboxsync, 20 months ago

VMM,IPRT,VBoxGuest,SUPDrv: Added a more efficient interface for guest logging using the CPUID instruction. This is mainly intended for development use and not enabled by default. Require updating host drivers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 459.6 KB
Line 
1/* $Id: IEMAll.cpp 96811 2022-09-21 13:23:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 */
86
87/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
88#ifdef _MSC_VER
89# pragma warning(disable:4505)
90#endif
91
92
93/*********************************************************************************************************************************
94* Header Files *
95*********************************************************************************************************************************/
96#define LOG_GROUP LOG_GROUP_IEM
97#define VMCPU_INCL_CPUM_GST_CTX
98#include <VBox/vmm/iem.h>
99#include <VBox/vmm/cpum.h>
100#include <VBox/vmm/apic.h>
101#include <VBox/vmm/pdm.h>
102#include <VBox/vmm/pgm.h>
103#include <VBox/vmm/iom.h>
104#include <VBox/vmm/em.h>
105#include <VBox/vmm/hm.h>
106#include <VBox/vmm/nem.h>
107#include <VBox/vmm/gim.h>
108#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
109# include <VBox/vmm/em.h>
110# include <VBox/vmm/hm_svm.h>
111#endif
112#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
113# include <VBox/vmm/hmvmxinline.h>
114#endif
115#include <VBox/vmm/tm.h>
116#include <VBox/vmm/dbgf.h>
117#include <VBox/vmm/dbgftrace.h>
118#include "IEMInternal.h"
119#include <VBox/vmm/vmcc.h>
120#include <VBox/log.h>
121#include <VBox/err.h>
122#include <VBox/param.h>
123#include <VBox/dis.h>
124#include <VBox/disopcode.h>
125#include <iprt/asm-math.h>
126#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
127# include <iprt/asm-amd64-x86.h>
128#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
129# include <iprt/asm-arm.h>
130#endif
131#include <iprt/assert.h>
132#include <iprt/string.h>
133#include <iprt/x86.h>
134
135#include "IEMInline.h"
136
137
138/*********************************************************************************************************************************
139* Structures and Typedefs *
140*********************************************************************************************************************************/
141/**
142 * CPU exception classes.
143 */
144typedef enum IEMXCPTCLASS
145{
146 IEMXCPTCLASS_BENIGN,
147 IEMXCPTCLASS_CONTRIBUTORY,
148 IEMXCPTCLASS_PAGE_FAULT,
149 IEMXCPTCLASS_DOUBLE_FAULT
150} IEMXCPTCLASS;
151
152
153/*********************************************************************************************************************************
154* Global Variables *
155*********************************************************************************************************************************/
156#if defined(IEM_LOG_MEMORY_WRITES)
157/** What IEM just wrote. */
158uint8_t g_abIemWrote[256];
159/** How much IEM just wrote. */
160size_t g_cbIemWrote;
161#endif
162
163
164/*********************************************************************************************************************************
165* Internal Functions *
166*********************************************************************************************************************************/
167static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
168 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
169
170
171/**
172 * Initializes the decoder state.
173 *
174 * iemReInitDecoder is mostly a copy of this function.
175 *
176 * @param pVCpu The cross context virtual CPU structure of the
177 * calling thread.
178 * @param fBypassHandlers Whether to bypass access handlers.
179 * @param fDisregardLock Whether to disregard the LOCK prefix.
180 */
181DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
182{
183 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
184 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
187 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
188 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
189 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
191 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
192 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
193
194 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
195 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
196 pVCpu->iem.s.enmCpuMode = enmMode;
197 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
198 pVCpu->iem.s.enmEffAddrMode = enmMode;
199 if (enmMode != IEMMODE_64BIT)
200 {
201 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
202 pVCpu->iem.s.enmEffOpSize = enmMode;
203 }
204 else
205 {
206 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
207 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
208 }
209 pVCpu->iem.s.fPrefixes = 0;
210 pVCpu->iem.s.uRexReg = 0;
211 pVCpu->iem.s.uRexB = 0;
212 pVCpu->iem.s.uRexIndex = 0;
213 pVCpu->iem.s.idxPrefix = 0;
214 pVCpu->iem.s.uVex3rdReg = 0;
215 pVCpu->iem.s.uVexLength = 0;
216 pVCpu->iem.s.fEvexStuff = 0;
217 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
218#ifdef IEM_WITH_CODE_TLB
219 pVCpu->iem.s.pbInstrBuf = NULL;
220 pVCpu->iem.s.offInstrNextByte = 0;
221 pVCpu->iem.s.offCurInstrStart = 0;
222# ifdef VBOX_STRICT
223 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
224 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
225 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
226# endif
227#else
228 pVCpu->iem.s.offOpcode = 0;
229 pVCpu->iem.s.cbOpcode = 0;
230#endif
231 pVCpu->iem.s.offModRm = 0;
232 pVCpu->iem.s.cActiveMappings = 0;
233 pVCpu->iem.s.iNextMapping = 0;
234 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
235 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
236 pVCpu->iem.s.fDisregardLock = fDisregardLock;
237
238#ifdef DBGFTRACE_ENABLED
239 switch (enmMode)
240 {
241 case IEMMODE_64BIT:
242 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
243 break;
244 case IEMMODE_32BIT:
245 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
246 break;
247 case IEMMODE_16BIT:
248 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
249 break;
250 }
251#endif
252}
253
254
255/**
256 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
257 *
258 * This is mostly a copy of iemInitDecoder.
259 *
260 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
261 */
262DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
263{
264 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
265 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
267 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
268 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
269 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
270 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
271 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
273
274 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
275 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
276 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
277 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
278 pVCpu->iem.s.enmEffAddrMode = enmMode;
279 if (enmMode != IEMMODE_64BIT)
280 {
281 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
282 pVCpu->iem.s.enmEffOpSize = enmMode;
283 }
284 else
285 {
286 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
287 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
288 }
289 pVCpu->iem.s.fPrefixes = 0;
290 pVCpu->iem.s.uRexReg = 0;
291 pVCpu->iem.s.uRexB = 0;
292 pVCpu->iem.s.uRexIndex = 0;
293 pVCpu->iem.s.idxPrefix = 0;
294 pVCpu->iem.s.uVex3rdReg = 0;
295 pVCpu->iem.s.uVexLength = 0;
296 pVCpu->iem.s.fEvexStuff = 0;
297 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
298#ifdef IEM_WITH_CODE_TLB
299 if (pVCpu->iem.s.pbInstrBuf)
300 {
301 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
302 - pVCpu->iem.s.uInstrBufPc;
303 if (off < pVCpu->iem.s.cbInstrBufTotal)
304 {
305 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
306 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
307 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
308 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
309 else
310 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
311 }
312 else
313 {
314 pVCpu->iem.s.pbInstrBuf = NULL;
315 pVCpu->iem.s.offInstrNextByte = 0;
316 pVCpu->iem.s.offCurInstrStart = 0;
317 pVCpu->iem.s.cbInstrBuf = 0;
318 pVCpu->iem.s.cbInstrBufTotal = 0;
319 }
320 }
321 else
322 {
323 pVCpu->iem.s.offInstrNextByte = 0;
324 pVCpu->iem.s.offCurInstrStart = 0;
325 pVCpu->iem.s.cbInstrBuf = 0;
326 pVCpu->iem.s.cbInstrBufTotal = 0;
327 }
328#else
329 pVCpu->iem.s.cbOpcode = 0;
330 pVCpu->iem.s.offOpcode = 0;
331#endif
332 pVCpu->iem.s.offModRm = 0;
333 Assert(pVCpu->iem.s.cActiveMappings == 0);
334 pVCpu->iem.s.iNextMapping = 0;
335 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
336 Assert(pVCpu->iem.s.fBypassHandlers == false);
337
338#ifdef DBGFTRACE_ENABLED
339 switch (enmMode)
340 {
341 case IEMMODE_64BIT:
342 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
343 break;
344 case IEMMODE_32BIT:
345 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
346 break;
347 case IEMMODE_16BIT:
348 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
349 break;
350 }
351#endif
352}
353
354
355
356/**
357 * Prefetch opcodes the first time when starting executing.
358 *
359 * @returns Strict VBox status code.
360 * @param pVCpu The cross context virtual CPU structure of the
361 * calling thread.
362 * @param fBypassHandlers Whether to bypass access handlers.
363 * @param fDisregardLock Whether to disregard LOCK prefixes.
364 *
365 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
366 * store them as such.
367 */
368static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
369{
370 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
371
372#ifdef IEM_WITH_CODE_TLB
373 /** @todo Do ITLB lookup here. */
374
375#else /* !IEM_WITH_CODE_TLB */
376
377 /*
378 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
379 *
380 * First translate CS:rIP to a physical address.
381 */
382 uint32_t cbToTryRead;
383 RTGCPTR GCPtrPC;
384 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
385 {
386 cbToTryRead = GUEST_PAGE_SIZE;
387 GCPtrPC = pVCpu->cpum.GstCtx.rip;
388 if (IEM_IS_CANONICAL(GCPtrPC))
389 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
390 else
391 return iemRaiseGeneralProtectionFault0(pVCpu);
392 }
393 else
394 {
395 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
396 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
397 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
398 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
399 else
400 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
401 if (cbToTryRead) { /* likely */ }
402 else /* overflowed */
403 {
404 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
405 cbToTryRead = UINT32_MAX;
406 }
407 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
408 Assert(GCPtrPC <= UINT32_MAX);
409 }
410
411 PGMPTWALK Walk;
412 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
413 if (RT_SUCCESS(rc))
414 Assert(Walk.fSucceeded); /* probable. */
415 else
416 {
417 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
418#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
419 if (Walk.fFailed & PGM_WALKFAIL_EPT)
420 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
421#endif
422 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
423 }
424 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
425 else
426 {
427 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
428#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
429 if (Walk.fFailed & PGM_WALKFAIL_EPT)
430 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
431#endif
432 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
433 }
434 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
435 else
436 {
437 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
438#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
439 if (Walk.fFailed & PGM_WALKFAIL_EPT)
440 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
441#endif
442 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
443 }
444 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
445 /** @todo Check reserved bits and such stuff. PGM is better at doing
446 * that, so do it when implementing the guest virtual address
447 * TLB... */
448
449 /*
450 * Read the bytes at this address.
451 */
452 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
453 if (cbToTryRead > cbLeftOnPage)
454 cbToTryRead = cbLeftOnPage;
455 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
456 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
457
458 if (!pVCpu->iem.s.fBypassHandlers)
459 {
460 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
461 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
462 { /* likely */ }
463 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
464 {
465 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
466 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
467 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
468 }
469 else
470 {
471 Log((RT_SUCCESS(rcStrict)
472 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
473 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
474 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
475 return rcStrict;
476 }
477 }
478 else
479 {
480 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
481 if (RT_SUCCESS(rc))
482 { /* likely */ }
483 else
484 {
485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
486 GCPtrPC, GCPhys, rc, cbToTryRead));
487 return rc;
488 }
489 }
490 pVCpu->iem.s.cbOpcode = cbToTryRead;
491#endif /* !IEM_WITH_CODE_TLB */
492 return VINF_SUCCESS;
493}
494
495
496/**
497 * Invalidates the IEM TLBs.
498 *
499 * This is called internally as well as by PGM when moving GC mappings.
500 *
501 * @returns
502 * @param pVCpu The cross context virtual CPU structure of the calling
503 * thread.
504 */
505VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
506{
507#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
508 Log10(("IEMTlbInvalidateAll\n"));
509# ifdef IEM_WITH_CODE_TLB
510 pVCpu->iem.s.cbInstrBufTotal = 0;
511 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
512 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
513 { /* very likely */ }
514 else
515 {
516 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
517 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
518 while (i-- > 0)
519 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
520 }
521# endif
522
523# ifdef IEM_WITH_DATA_TLB
524 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
525 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
526 { /* very likely */ }
527 else
528 {
529 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
530 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
531 while (i-- > 0)
532 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
533 }
534# endif
535#else
536 RT_NOREF(pVCpu);
537#endif
538}
539
540
541/**
542 * Invalidates a page in the TLBs.
543 *
544 * @param pVCpu The cross context virtual CPU structure of the calling
545 * thread.
546 * @param GCPtr The address of the page to invalidate
547 * @thread EMT(pVCpu)
548 */
549VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
550{
551#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
552 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
553 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
554 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
555 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
556
557# ifdef IEM_WITH_CODE_TLB
558 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
559 {
560 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
561 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
562 pVCpu->iem.s.cbInstrBufTotal = 0;
563 }
564# endif
565
566# ifdef IEM_WITH_DATA_TLB
567 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
568 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
569# endif
570#else
571 NOREF(pVCpu); NOREF(GCPtr);
572#endif
573}
574
575
576#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
577/**
578 * Invalid both TLBs slow fashion following a rollover.
579 *
580 * Worker for IEMTlbInvalidateAllPhysical,
581 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
582 * iemMemMapJmp and others.
583 *
584 * @thread EMT(pVCpu)
585 */
586static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
587{
588 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
589 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
590 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
591
592 unsigned i;
593# ifdef IEM_WITH_CODE_TLB
594 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
595 while (i-- > 0)
596 {
597 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
598 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
599 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
600 }
601# endif
602# ifdef IEM_WITH_DATA_TLB
603 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
604 while (i-- > 0)
605 {
606 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
607 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
608 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
609 }
610# endif
611
612}
613#endif
614
615
616/**
617 * Invalidates the host physical aspects of the IEM TLBs.
618 *
619 * This is called internally as well as by PGM when moving GC mappings.
620 *
621 * @param pVCpu The cross context virtual CPU structure of the calling
622 * thread.
623 * @note Currently not used.
624 */
625VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
626{
627#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
628 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
629 Log10(("IEMTlbInvalidateAllPhysical\n"));
630
631# ifdef IEM_WITH_CODE_TLB
632 pVCpu->iem.s.cbInstrBufTotal = 0;
633# endif
634 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
635 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
636 {
637 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
638 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
639 }
640 else
641 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
642#else
643 NOREF(pVCpu);
644#endif
645}
646
647
648/**
649 * Invalidates the host physical aspects of the IEM TLBs.
650 *
651 * This is called internally as well as by PGM when moving GC mappings.
652 *
653 * @param pVM The cross context VM structure.
654 * @param idCpuCaller The ID of the calling EMT if available to the caller,
655 * otherwise NIL_VMCPUID.
656 *
657 * @remarks Caller holds the PGM lock.
658 */
659VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
660{
661#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
662 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
663 if (pVCpuCaller)
664 VMCPU_ASSERT_EMT(pVCpuCaller);
665 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
666
667 VMCC_FOR_EACH_VMCPU(pVM)
668 {
669# ifdef IEM_WITH_CODE_TLB
670 if (pVCpuCaller == pVCpu)
671 pVCpu->iem.s.cbInstrBufTotal = 0;
672# endif
673
674 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
675 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
676 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
677 { /* likely */}
678 else if (pVCpuCaller == pVCpu)
679 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
680 else
681 {
682 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
683 continue;
684 }
685 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
686 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
687 }
688 VMCC_FOR_EACH_VMCPU_END(pVM);
689
690#else
691 RT_NOREF(pVM, idCpuCaller);
692#endif
693}
694
695#ifdef IEM_WITH_CODE_TLB
696
697/**
698 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
699 * failure and jumps.
700 *
701 * We end up here for a number of reasons:
702 * - pbInstrBuf isn't yet initialized.
703 * - Advancing beyond the buffer boundrary (e.g. cross page).
704 * - Advancing beyond the CS segment limit.
705 * - Fetching from non-mappable page (e.g. MMIO).
706 *
707 * @param pVCpu The cross context virtual CPU structure of the
708 * calling thread.
709 * @param pvDst Where to return the bytes.
710 * @param cbDst Number of bytes to read.
711 *
712 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
713 */
714void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
715{
716#ifdef IN_RING3
717 for (;;)
718 {
719 Assert(cbDst <= 8);
720 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
721
722 /*
723 * We might have a partial buffer match, deal with that first to make the
724 * rest simpler. This is the first part of the cross page/buffer case.
725 */
726 if (pVCpu->iem.s.pbInstrBuf != NULL)
727 {
728 if (offBuf < pVCpu->iem.s.cbInstrBuf)
729 {
730 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
731 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
732 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
733
734 cbDst -= cbCopy;
735 pvDst = (uint8_t *)pvDst + cbCopy;
736 offBuf += cbCopy;
737 pVCpu->iem.s.offInstrNextByte += offBuf;
738 }
739 }
740
741 /*
742 * Check segment limit, figuring how much we're allowed to access at this point.
743 *
744 * We will fault immediately if RIP is past the segment limit / in non-canonical
745 * territory. If we do continue, there are one or more bytes to read before we
746 * end up in trouble and we need to do that first before faulting.
747 */
748 RTGCPTR GCPtrFirst;
749 uint32_t cbMaxRead;
750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
751 {
752 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
753 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
754 { /* likely */ }
755 else
756 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
757 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
758 }
759 else
760 {
761 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
762 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
763 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
764 { /* likely */ }
765 else
766 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
767 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
768 if (cbMaxRead != 0)
769 { /* likely */ }
770 else
771 {
772 /* Overflowed because address is 0 and limit is max. */
773 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
774 cbMaxRead = X86_PAGE_SIZE;
775 }
776 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
777 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
778 if (cbMaxRead2 < cbMaxRead)
779 cbMaxRead = cbMaxRead2;
780 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
781 }
782
783 /*
784 * Get the TLB entry for this piece of code.
785 */
786 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
787 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
788 if (pTlbe->uTag == uTag)
789 {
790 /* likely when executing lots of code, otherwise unlikely */
791# ifdef VBOX_WITH_STATISTICS
792 pVCpu->iem.s.CodeTlb.cTlbHits++;
793# endif
794 }
795 else
796 {
797 pVCpu->iem.s.CodeTlb.cTlbMisses++;
798 PGMPTWALK Walk;
799 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
800 if (RT_FAILURE(rc))
801 {
802#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
803 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
804 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
805#endif
806 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
807 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
808 }
809
810 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
811 Assert(Walk.fSucceeded);
812 pTlbe->uTag = uTag;
813 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
814 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
815 pTlbe->GCPhys = Walk.GCPhys;
816 pTlbe->pbMappingR3 = NULL;
817 }
818
819 /*
820 * Check TLB page table level access flags.
821 */
822 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
823 {
824 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
825 {
826 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
827 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
828 }
829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
830 {
831 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
832 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
833 }
834 }
835
836 /*
837 * Look up the physical page info if necessary.
838 */
839 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
840 { /* not necessary */ }
841 else
842 {
843 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
844 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
845 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
846 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
847 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
848 { /* likely */ }
849 else
850 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
851 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
852 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
853 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
854 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
855 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
856 }
857
858# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
859 /*
860 * Try do a direct read using the pbMappingR3 pointer.
861 */
862 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
863 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
864 {
865 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
866 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
867 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
868 {
869 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
870 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
871 }
872 else
873 {
874 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
875 Assert(cbInstr < cbMaxRead);
876 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
877 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
878 }
879 if (cbDst <= cbMaxRead)
880 {
881 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
882 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
883 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
884 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
885 return;
886 }
887 pVCpu->iem.s.pbInstrBuf = NULL;
888
889 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
890 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
891 }
892 else
893# endif
894#if 0
895 /*
896 * If there is no special read handling, so we can read a bit more and
897 * put it in the prefetch buffer.
898 */
899 if ( cbDst < cbMaxRead
900 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
901 {
902 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
903 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
904 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
905 { /* likely */ }
906 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
907 {
908 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
909 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
910 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
911 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
912 }
913 else
914 {
915 Log((RT_SUCCESS(rcStrict)
916 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
917 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
918 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
919 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
920 }
921 }
922 /*
923 * Special read handling, so only read exactly what's needed.
924 * This is a highly unlikely scenario.
925 */
926 else
927#endif
928 {
929 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
930 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
931 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
932 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
933 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
934 { /* likely */ }
935 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
936 {
937 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
938 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
939 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
940 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
941 }
942 else
943 {
944 Log((RT_SUCCESS(rcStrict)
945 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
946 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
947 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
948 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
949 }
950 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
951 if (cbToRead == cbDst)
952 return;
953 }
954
955 /*
956 * More to read, loop.
957 */
958 cbDst -= cbMaxRead;
959 pvDst = (uint8_t *)pvDst + cbMaxRead;
960 }
961#else
962 RT_NOREF(pvDst, cbDst);
963 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
964#endif
965}
966
967#else
968
969/**
970 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
971 * exception if it fails.
972 *
973 * @returns Strict VBox status code.
974 * @param pVCpu The cross context virtual CPU structure of the
975 * calling thread.
976 * @param cbMin The minimum number of bytes relative offOpcode
977 * that must be read.
978 */
979VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
980{
981 /*
982 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
983 *
984 * First translate CS:rIP to a physical address.
985 */
986 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
987 uint32_t cbToTryRead;
988 RTGCPTR GCPtrNext;
989 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
990 {
991 cbToTryRead = GUEST_PAGE_SIZE;
992 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
993 if (!IEM_IS_CANONICAL(GCPtrNext))
994 return iemRaiseGeneralProtectionFault0(pVCpu);
995 }
996 else
997 {
998 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
999 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1000 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1001 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1002 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1003 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1004 if (!cbToTryRead) /* overflowed */
1005 {
1006 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1007 cbToTryRead = UINT32_MAX;
1008 /** @todo check out wrapping around the code segment. */
1009 }
1010 if (cbToTryRead < cbMin - cbLeft)
1011 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1012 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1013 }
1014
1015 /* Only read up to the end of the page, and make sure we don't read more
1016 than the opcode buffer can hold. */
1017 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1018 if (cbToTryRead > cbLeftOnPage)
1019 cbToTryRead = cbLeftOnPage;
1020 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1021 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1022/** @todo r=bird: Convert assertion into undefined opcode exception? */
1023 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1024
1025 PGMPTWALK Walk;
1026 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1027 if (RT_FAILURE(rc))
1028 {
1029 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1030#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1031 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1032 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1033#endif
1034 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1035 }
1036 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1037 {
1038 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1039#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1040 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1041 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1042#endif
1043 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1044 }
1045 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1046 {
1047 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1048#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1049 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1050 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1051#endif
1052 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1053 }
1054 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1055 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1056 /** @todo Check reserved bits and such stuff. PGM is better at doing
1057 * that, so do it when implementing the guest virtual address
1058 * TLB... */
1059
1060 /*
1061 * Read the bytes at this address.
1062 *
1063 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1064 * and since PATM should only patch the start of an instruction there
1065 * should be no need to check again here.
1066 */
1067 if (!pVCpu->iem.s.fBypassHandlers)
1068 {
1069 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1070 cbToTryRead, PGMACCESSORIGIN_IEM);
1071 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1072 { /* likely */ }
1073 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1074 {
1075 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1076 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1077 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1078 }
1079 else
1080 {
1081 Log((RT_SUCCESS(rcStrict)
1082 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1083 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1084 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1085 return rcStrict;
1086 }
1087 }
1088 else
1089 {
1090 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1091 if (RT_SUCCESS(rc))
1092 { /* likely */ }
1093 else
1094 {
1095 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1096 return rc;
1097 }
1098 }
1099 pVCpu->iem.s.cbOpcode += cbToTryRead;
1100 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1101
1102 return VINF_SUCCESS;
1103}
1104
1105#endif /* !IEM_WITH_CODE_TLB */
1106#ifndef IEM_WITH_SETJMP
1107
1108/**
1109 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1110 *
1111 * @returns Strict VBox status code.
1112 * @param pVCpu The cross context virtual CPU structure of the
1113 * calling thread.
1114 * @param pb Where to return the opcode byte.
1115 */
1116VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1117{
1118 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1119 if (rcStrict == VINF_SUCCESS)
1120 {
1121 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1122 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1123 pVCpu->iem.s.offOpcode = offOpcode + 1;
1124 }
1125 else
1126 *pb = 0;
1127 return rcStrict;
1128}
1129
1130#else /* IEM_WITH_SETJMP */
1131
1132/**
1133 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1134 *
1135 * @returns The opcode byte.
1136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1137 */
1138uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1139{
1140# ifdef IEM_WITH_CODE_TLB
1141 uint8_t u8;
1142 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1143 return u8;
1144# else
1145 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1146 if (rcStrict == VINF_SUCCESS)
1147 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1148 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1149# endif
1150}
1151
1152#endif /* IEM_WITH_SETJMP */
1153
1154#ifndef IEM_WITH_SETJMP
1155
1156/**
1157 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1158 *
1159 * @returns Strict VBox status code.
1160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1161 * @param pu16 Where to return the opcode dword.
1162 */
1163VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1164{
1165 uint8_t u8;
1166 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1167 if (rcStrict == VINF_SUCCESS)
1168 *pu16 = (int8_t)u8;
1169 return rcStrict;
1170}
1171
1172
1173/**
1174 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1175 *
1176 * @returns Strict VBox status code.
1177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1178 * @param pu32 Where to return the opcode dword.
1179 */
1180VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1181{
1182 uint8_t u8;
1183 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1184 if (rcStrict == VINF_SUCCESS)
1185 *pu32 = (int8_t)u8;
1186 return rcStrict;
1187}
1188
1189
1190/**
1191 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1192 *
1193 * @returns Strict VBox status code.
1194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1195 * @param pu64 Where to return the opcode qword.
1196 */
1197VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1198{
1199 uint8_t u8;
1200 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1201 if (rcStrict == VINF_SUCCESS)
1202 *pu64 = (int8_t)u8;
1203 return rcStrict;
1204}
1205
1206#endif /* !IEM_WITH_SETJMP */
1207
1208
1209#ifndef IEM_WITH_SETJMP
1210
1211/**
1212 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1213 *
1214 * @returns Strict VBox status code.
1215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1216 * @param pu16 Where to return the opcode word.
1217 */
1218VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1219{
1220 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1221 if (rcStrict == VINF_SUCCESS)
1222 {
1223 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1224# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1225 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1226# else
1227 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1228# endif
1229 pVCpu->iem.s.offOpcode = offOpcode + 2;
1230 }
1231 else
1232 *pu16 = 0;
1233 return rcStrict;
1234}
1235
1236#else /* IEM_WITH_SETJMP */
1237
1238/**
1239 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1240 *
1241 * @returns The opcode word.
1242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1243 */
1244uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1245{
1246# ifdef IEM_WITH_CODE_TLB
1247 uint16_t u16;
1248 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1249 return u16;
1250# else
1251 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1252 if (rcStrict == VINF_SUCCESS)
1253 {
1254 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1255 pVCpu->iem.s.offOpcode += 2;
1256# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1257 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1258# else
1259 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1260# endif
1261 }
1262 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1263# endif
1264}
1265
1266#endif /* IEM_WITH_SETJMP */
1267
1268#ifndef IEM_WITH_SETJMP
1269
1270/**
1271 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1272 *
1273 * @returns Strict VBox status code.
1274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1275 * @param pu32 Where to return the opcode double word.
1276 */
1277VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1278{
1279 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1280 if (rcStrict == VINF_SUCCESS)
1281 {
1282 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1283 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1284 pVCpu->iem.s.offOpcode = offOpcode + 2;
1285 }
1286 else
1287 *pu32 = 0;
1288 return rcStrict;
1289}
1290
1291
1292/**
1293 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1294 *
1295 * @returns Strict VBox status code.
1296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1297 * @param pu64 Where to return the opcode quad word.
1298 */
1299VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1300{
1301 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1302 if (rcStrict == VINF_SUCCESS)
1303 {
1304 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1305 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1306 pVCpu->iem.s.offOpcode = offOpcode + 2;
1307 }
1308 else
1309 *pu64 = 0;
1310 return rcStrict;
1311}
1312
1313#endif /* !IEM_WITH_SETJMP */
1314
1315#ifndef IEM_WITH_SETJMP
1316
1317/**
1318 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1319 *
1320 * @returns Strict VBox status code.
1321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1322 * @param pu32 Where to return the opcode dword.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1331 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1332# else
1333 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1334 pVCpu->iem.s.abOpcode[offOpcode + 1],
1335 pVCpu->iem.s.abOpcode[offOpcode + 2],
1336 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1337# endif
1338 pVCpu->iem.s.offOpcode = offOpcode + 4;
1339 }
1340 else
1341 *pu32 = 0;
1342 return rcStrict;
1343}
1344
1345#else /* IEM_WITH_SETJMP */
1346
1347/**
1348 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1349 *
1350 * @returns The opcode dword.
1351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1352 */
1353uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1354{
1355# ifdef IEM_WITH_CODE_TLB
1356 uint32_t u32;
1357 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1358 return u32;
1359# else
1360 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1361 if (rcStrict == VINF_SUCCESS)
1362 {
1363 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1364 pVCpu->iem.s.offOpcode = offOpcode + 4;
1365# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1366 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1367# else
1368 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1369 pVCpu->iem.s.abOpcode[offOpcode + 1],
1370 pVCpu->iem.s.abOpcode[offOpcode + 2],
1371 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1372# endif
1373 }
1374 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1375# endif
1376}
1377
1378#endif /* IEM_WITH_SETJMP */
1379
1380#ifndef IEM_WITH_SETJMP
1381
1382/**
1383 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1384 *
1385 * @returns Strict VBox status code.
1386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1387 * @param pu64 Where to return the opcode dword.
1388 */
1389VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1390{
1391 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1392 if (rcStrict == VINF_SUCCESS)
1393 {
1394 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1395 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1396 pVCpu->iem.s.abOpcode[offOpcode + 1],
1397 pVCpu->iem.s.abOpcode[offOpcode + 2],
1398 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1399 pVCpu->iem.s.offOpcode = offOpcode + 4;
1400 }
1401 else
1402 *pu64 = 0;
1403 return rcStrict;
1404}
1405
1406
1407/**
1408 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1409 *
1410 * @returns Strict VBox status code.
1411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1412 * @param pu64 Where to return the opcode qword.
1413 */
1414VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1415{
1416 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1417 if (rcStrict == VINF_SUCCESS)
1418 {
1419 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1420 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1421 pVCpu->iem.s.abOpcode[offOpcode + 1],
1422 pVCpu->iem.s.abOpcode[offOpcode + 2],
1423 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1424 pVCpu->iem.s.offOpcode = offOpcode + 4;
1425 }
1426 else
1427 *pu64 = 0;
1428 return rcStrict;
1429}
1430
1431#endif /* !IEM_WITH_SETJMP */
1432
1433#ifndef IEM_WITH_SETJMP
1434
1435/**
1436 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1437 *
1438 * @returns Strict VBox status code.
1439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1440 * @param pu64 Where to return the opcode qword.
1441 */
1442VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1443{
1444 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1445 if (rcStrict == VINF_SUCCESS)
1446 {
1447 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1448# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1449 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1450# else
1451 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1452 pVCpu->iem.s.abOpcode[offOpcode + 1],
1453 pVCpu->iem.s.abOpcode[offOpcode + 2],
1454 pVCpu->iem.s.abOpcode[offOpcode + 3],
1455 pVCpu->iem.s.abOpcode[offOpcode + 4],
1456 pVCpu->iem.s.abOpcode[offOpcode + 5],
1457 pVCpu->iem.s.abOpcode[offOpcode + 6],
1458 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1459# endif
1460 pVCpu->iem.s.offOpcode = offOpcode + 8;
1461 }
1462 else
1463 *pu64 = 0;
1464 return rcStrict;
1465}
1466
1467#else /* IEM_WITH_SETJMP */
1468
1469/**
1470 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1471 *
1472 * @returns The opcode qword.
1473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1474 */
1475uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1476{
1477# ifdef IEM_WITH_CODE_TLB
1478 uint64_t u64;
1479 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1480 return u64;
1481# else
1482 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1483 if (rcStrict == VINF_SUCCESS)
1484 {
1485 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1486 pVCpu->iem.s.offOpcode = offOpcode + 8;
1487# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1488 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1489# else
1490 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1491 pVCpu->iem.s.abOpcode[offOpcode + 1],
1492 pVCpu->iem.s.abOpcode[offOpcode + 2],
1493 pVCpu->iem.s.abOpcode[offOpcode + 3],
1494 pVCpu->iem.s.abOpcode[offOpcode + 4],
1495 pVCpu->iem.s.abOpcode[offOpcode + 5],
1496 pVCpu->iem.s.abOpcode[offOpcode + 6],
1497 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1498# endif
1499 }
1500 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1501# endif
1502}
1503
1504#endif /* IEM_WITH_SETJMP */
1505
1506
1507
1508/** @name Misc Worker Functions.
1509 * @{
1510 */
1511
1512/**
1513 * Gets the exception class for the specified exception vector.
1514 *
1515 * @returns The class of the specified exception.
1516 * @param uVector The exception vector.
1517 */
1518static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1519{
1520 Assert(uVector <= X86_XCPT_LAST);
1521 switch (uVector)
1522 {
1523 case X86_XCPT_DE:
1524 case X86_XCPT_TS:
1525 case X86_XCPT_NP:
1526 case X86_XCPT_SS:
1527 case X86_XCPT_GP:
1528 case X86_XCPT_SX: /* AMD only */
1529 return IEMXCPTCLASS_CONTRIBUTORY;
1530
1531 case X86_XCPT_PF:
1532 case X86_XCPT_VE: /* Intel only */
1533 return IEMXCPTCLASS_PAGE_FAULT;
1534
1535 case X86_XCPT_DF:
1536 return IEMXCPTCLASS_DOUBLE_FAULT;
1537 }
1538 return IEMXCPTCLASS_BENIGN;
1539}
1540
1541
1542/**
1543 * Evaluates how to handle an exception caused during delivery of another event
1544 * (exception / interrupt).
1545 *
1546 * @returns How to handle the recursive exception.
1547 * @param pVCpu The cross context virtual CPU structure of the
1548 * calling thread.
1549 * @param fPrevFlags The flags of the previous event.
1550 * @param uPrevVector The vector of the previous event.
1551 * @param fCurFlags The flags of the current exception.
1552 * @param uCurVector The vector of the current exception.
1553 * @param pfXcptRaiseInfo Where to store additional information about the
1554 * exception condition. Optional.
1555 */
1556VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1557 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1558{
1559 /*
1560 * Only CPU exceptions can be raised while delivering other events, software interrupt
1561 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1562 */
1563 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1564 Assert(pVCpu); RT_NOREF(pVCpu);
1565 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1566
1567 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1568 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1569 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1570 {
1571 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1572 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1573 {
1574 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1575 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1576 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1577 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1578 {
1579 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1580 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1581 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1582 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1583 uCurVector, pVCpu->cpum.GstCtx.cr2));
1584 }
1585 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1586 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1587 {
1588 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1589 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1590 }
1591 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1592 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1593 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1594 {
1595 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1596 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1597 }
1598 }
1599 else
1600 {
1601 if (uPrevVector == X86_XCPT_NMI)
1602 {
1603 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1604 if (uCurVector == X86_XCPT_PF)
1605 {
1606 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1607 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1608 }
1609 }
1610 else if ( uPrevVector == X86_XCPT_AC
1611 && uCurVector == X86_XCPT_AC)
1612 {
1613 enmRaise = IEMXCPTRAISE_CPU_HANG;
1614 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1615 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1616 }
1617 }
1618 }
1619 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1620 {
1621 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1622 if (uCurVector == X86_XCPT_PF)
1623 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1624 }
1625 else
1626 {
1627 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1628 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1629 }
1630
1631 if (pfXcptRaiseInfo)
1632 *pfXcptRaiseInfo = fRaiseInfo;
1633 return enmRaise;
1634}
1635
1636
1637/**
1638 * Enters the CPU shutdown state initiated by a triple fault or other
1639 * unrecoverable conditions.
1640 *
1641 * @returns Strict VBox status code.
1642 * @param pVCpu The cross context virtual CPU structure of the
1643 * calling thread.
1644 */
1645static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1646{
1647 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1648 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1649
1650 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1651 {
1652 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1653 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1654 }
1655
1656 RT_NOREF(pVCpu);
1657 return VINF_EM_TRIPLE_FAULT;
1658}
1659
1660
1661/**
1662 * Validates a new SS segment.
1663 *
1664 * @returns VBox strict status code.
1665 * @param pVCpu The cross context virtual CPU structure of the
1666 * calling thread.
1667 * @param NewSS The new SS selctor.
1668 * @param uCpl The CPL to load the stack for.
1669 * @param pDesc Where to return the descriptor.
1670 */
1671static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1672{
1673 /* Null selectors are not allowed (we're not called for dispatching
1674 interrupts with SS=0 in long mode). */
1675 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1676 {
1677 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1678 return iemRaiseTaskSwitchFault0(pVCpu);
1679 }
1680
1681 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1682 if ((NewSS & X86_SEL_RPL) != uCpl)
1683 {
1684 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1685 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1686 }
1687
1688 /*
1689 * Read the descriptor.
1690 */
1691 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1692 if (rcStrict != VINF_SUCCESS)
1693 return rcStrict;
1694
1695 /*
1696 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1697 */
1698 if (!pDesc->Legacy.Gen.u1DescType)
1699 {
1700 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1701 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1702 }
1703
1704 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1705 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1706 {
1707 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1708 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1709 }
1710 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1711 {
1712 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1713 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1714 }
1715
1716 /* Is it there? */
1717 /** @todo testcase: Is this checked before the canonical / limit check below? */
1718 if (!pDesc->Legacy.Gen.u1Present)
1719 {
1720 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1721 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1722 }
1723
1724 return VINF_SUCCESS;
1725}
1726
1727/** @} */
1728
1729
1730/** @name Raising Exceptions.
1731 *
1732 * @{
1733 */
1734
1735
1736/**
1737 * Loads the specified stack far pointer from the TSS.
1738 *
1739 * @returns VBox strict status code.
1740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1741 * @param uCpl The CPL to load the stack for.
1742 * @param pSelSS Where to return the new stack segment.
1743 * @param puEsp Where to return the new stack pointer.
1744 */
1745static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1746{
1747 VBOXSTRICTRC rcStrict;
1748 Assert(uCpl < 4);
1749
1750 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1751 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1752 {
1753 /*
1754 * 16-bit TSS (X86TSS16).
1755 */
1756 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1757 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1758 {
1759 uint32_t off = uCpl * 4 + 2;
1760 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1761 {
1762 /** @todo check actual access pattern here. */
1763 uint32_t u32Tmp = 0; /* gcc maybe... */
1764 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1765 if (rcStrict == VINF_SUCCESS)
1766 {
1767 *puEsp = RT_LOWORD(u32Tmp);
1768 *pSelSS = RT_HIWORD(u32Tmp);
1769 return VINF_SUCCESS;
1770 }
1771 }
1772 else
1773 {
1774 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1775 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1776 }
1777 break;
1778 }
1779
1780 /*
1781 * 32-bit TSS (X86TSS32).
1782 */
1783 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1784 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1785 {
1786 uint32_t off = uCpl * 8 + 4;
1787 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1788 {
1789/** @todo check actual access pattern here. */
1790 uint64_t u64Tmp;
1791 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1792 if (rcStrict == VINF_SUCCESS)
1793 {
1794 *puEsp = u64Tmp & UINT32_MAX;
1795 *pSelSS = (RTSEL)(u64Tmp >> 32);
1796 return VINF_SUCCESS;
1797 }
1798 }
1799 else
1800 {
1801 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1802 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1803 }
1804 break;
1805 }
1806
1807 default:
1808 AssertFailed();
1809 rcStrict = VERR_IEM_IPE_4;
1810 break;
1811 }
1812
1813 *puEsp = 0; /* make gcc happy */
1814 *pSelSS = 0; /* make gcc happy */
1815 return rcStrict;
1816}
1817
1818
1819/**
1820 * Loads the specified stack pointer from the 64-bit TSS.
1821 *
1822 * @returns VBox strict status code.
1823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1824 * @param uCpl The CPL to load the stack for.
1825 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1826 * @param puRsp Where to return the new stack pointer.
1827 */
1828static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1829{
1830 Assert(uCpl < 4);
1831 Assert(uIst < 8);
1832 *puRsp = 0; /* make gcc happy */
1833
1834 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1835 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1836
1837 uint32_t off;
1838 if (uIst)
1839 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1840 else
1841 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1842 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1843 {
1844 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1845 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1846 }
1847
1848 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1849}
1850
1851
1852/**
1853 * Adjust the CPU state according to the exception being raised.
1854 *
1855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1856 * @param u8Vector The exception that has been raised.
1857 */
1858DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1859{
1860 switch (u8Vector)
1861 {
1862 case X86_XCPT_DB:
1863 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1864 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1865 break;
1866 /** @todo Read the AMD and Intel exception reference... */
1867 }
1868}
1869
1870
1871/**
1872 * Implements exceptions and interrupts for real mode.
1873 *
1874 * @returns VBox strict status code.
1875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1876 * @param cbInstr The number of bytes to offset rIP by in the return
1877 * address.
1878 * @param u8Vector The interrupt / exception vector number.
1879 * @param fFlags The flags.
1880 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1881 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1882 */
1883static VBOXSTRICTRC
1884iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1885 uint8_t cbInstr,
1886 uint8_t u8Vector,
1887 uint32_t fFlags,
1888 uint16_t uErr,
1889 uint64_t uCr2) RT_NOEXCEPT
1890{
1891 NOREF(uErr); NOREF(uCr2);
1892 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1893
1894 /*
1895 * Read the IDT entry.
1896 */
1897 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1898 {
1899 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1900 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1901 }
1902 RTFAR16 Idte;
1903 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1904 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1905 {
1906 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1907 return rcStrict;
1908 }
1909
1910 /*
1911 * Push the stack frame.
1912 */
1913 uint16_t *pu16Frame;
1914 uint64_t uNewRsp;
1915 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1916 if (rcStrict != VINF_SUCCESS)
1917 return rcStrict;
1918
1919 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1920#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1921 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1922 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1923 fEfl |= UINT16_C(0xf000);
1924#endif
1925 pu16Frame[2] = (uint16_t)fEfl;
1926 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1927 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1928 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1929 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1930 return rcStrict;
1931
1932 /*
1933 * Load the vector address into cs:ip and make exception specific state
1934 * adjustments.
1935 */
1936 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1937 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1938 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1939 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1940 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1941 pVCpu->cpum.GstCtx.rip = Idte.off;
1942 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1943 IEMMISC_SET_EFL(pVCpu, fEfl);
1944
1945 /** @todo do we actually do this in real mode? */
1946 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1947 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1948
1949 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1950}
1951
1952
1953/**
1954 * Loads a NULL data selector into when coming from V8086 mode.
1955 *
1956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1957 * @param pSReg Pointer to the segment register.
1958 */
1959DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1960{
1961 pSReg->Sel = 0;
1962 pSReg->ValidSel = 0;
1963 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1964 {
1965 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1966 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1967 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1968 }
1969 else
1970 {
1971 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1972 /** @todo check this on AMD-V */
1973 pSReg->u64Base = 0;
1974 pSReg->u32Limit = 0;
1975 }
1976}
1977
1978
1979/**
1980 * Loads a segment selector during a task switch in V8086 mode.
1981 *
1982 * @param pSReg Pointer to the segment register.
1983 * @param uSel The selector value to load.
1984 */
1985DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1986{
1987 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1988 pSReg->Sel = uSel;
1989 pSReg->ValidSel = uSel;
1990 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1991 pSReg->u64Base = uSel << 4;
1992 pSReg->u32Limit = 0xffff;
1993 pSReg->Attr.u = 0xf3;
1994}
1995
1996
1997/**
1998 * Loads a segment selector during a task switch in protected mode.
1999 *
2000 * In this task switch scenario, we would throw \#TS exceptions rather than
2001 * \#GPs.
2002 *
2003 * @returns VBox strict status code.
2004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2005 * @param pSReg Pointer to the segment register.
2006 * @param uSel The new selector value.
2007 *
2008 * @remarks This does _not_ handle CS or SS.
2009 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2010 */
2011static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2012{
2013 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2014
2015 /* Null data selector. */
2016 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2017 {
2018 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2019 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2020 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2021 return VINF_SUCCESS;
2022 }
2023
2024 /* Fetch the descriptor. */
2025 IEMSELDESC Desc;
2026 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2027 if (rcStrict != VINF_SUCCESS)
2028 {
2029 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2030 VBOXSTRICTRC_VAL(rcStrict)));
2031 return rcStrict;
2032 }
2033
2034 /* Must be a data segment or readable code segment. */
2035 if ( !Desc.Legacy.Gen.u1DescType
2036 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2037 {
2038 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2039 Desc.Legacy.Gen.u4Type));
2040 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2041 }
2042
2043 /* Check privileges for data segments and non-conforming code segments. */
2044 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2045 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2046 {
2047 /* The RPL and the new CPL must be less than or equal to the DPL. */
2048 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2049 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2050 {
2051 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2052 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2053 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2054 }
2055 }
2056
2057 /* Is it there? */
2058 if (!Desc.Legacy.Gen.u1Present)
2059 {
2060 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2061 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2062 }
2063
2064 /* The base and limit. */
2065 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2066 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2067
2068 /*
2069 * Ok, everything checked out fine. Now set the accessed bit before
2070 * committing the result into the registers.
2071 */
2072 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2073 {
2074 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2075 if (rcStrict != VINF_SUCCESS)
2076 return rcStrict;
2077 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2078 }
2079
2080 /* Commit */
2081 pSReg->Sel = uSel;
2082 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2083 pSReg->u32Limit = cbLimit;
2084 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2085 pSReg->ValidSel = uSel;
2086 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2087 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2088 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2089
2090 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2091 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2092 return VINF_SUCCESS;
2093}
2094
2095
2096/**
2097 * Performs a task switch.
2098 *
2099 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2100 * caller is responsible for performing the necessary checks (like DPL, TSS
2101 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2102 * reference for JMP, CALL, IRET.
2103 *
2104 * If the task switch is the due to a software interrupt or hardware exception,
2105 * the caller is responsible for validating the TSS selector and descriptor. See
2106 * Intel Instruction reference for INT n.
2107 *
2108 * @returns VBox strict status code.
2109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2110 * @param enmTaskSwitch The cause of the task switch.
2111 * @param uNextEip The EIP effective after the task switch.
2112 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2113 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2114 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2115 * @param SelTSS The TSS selector of the new task.
2116 * @param pNewDescTSS Pointer to the new TSS descriptor.
2117 */
2118VBOXSTRICTRC
2119iemTaskSwitch(PVMCPUCC pVCpu,
2120 IEMTASKSWITCH enmTaskSwitch,
2121 uint32_t uNextEip,
2122 uint32_t fFlags,
2123 uint16_t uErr,
2124 uint64_t uCr2,
2125 RTSEL SelTSS,
2126 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2127{
2128 Assert(!IEM_IS_REAL_MODE(pVCpu));
2129 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2130 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2131
2132 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2133 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2134 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2135 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2136 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2137
2138 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2139 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2140
2141 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2142 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2143
2144 /* Update CR2 in case it's a page-fault. */
2145 /** @todo This should probably be done much earlier in IEM/PGM. See
2146 * @bugref{5653#c49}. */
2147 if (fFlags & IEM_XCPT_FLAGS_CR2)
2148 pVCpu->cpum.GstCtx.cr2 = uCr2;
2149
2150 /*
2151 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2152 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2153 */
2154 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2155 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2156 if (uNewTSSLimit < uNewTSSLimitMin)
2157 {
2158 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2159 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2160 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2161 }
2162
2163 /*
2164 * Task switches in VMX non-root mode always cause task switches.
2165 * The new TSS must have been read and validated (DPL, limits etc.) before a
2166 * task-switch VM-exit commences.
2167 *
2168 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2169 */
2170 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2171 {
2172 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2173 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2174 }
2175
2176 /*
2177 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2178 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2179 */
2180 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2181 {
2182 uint32_t const uExitInfo1 = SelTSS;
2183 uint32_t uExitInfo2 = uErr;
2184 switch (enmTaskSwitch)
2185 {
2186 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2187 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2188 default: break;
2189 }
2190 if (fFlags & IEM_XCPT_FLAGS_ERR)
2191 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2192 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2193 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2194
2195 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2196 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2197 RT_NOREF2(uExitInfo1, uExitInfo2);
2198 }
2199
2200 /*
2201 * Check the current TSS limit. The last written byte to the current TSS during the
2202 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2203 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2204 *
2205 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2206 * end up with smaller than "legal" TSS limits.
2207 */
2208 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2209 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2210 if (uCurTSSLimit < uCurTSSLimitMin)
2211 {
2212 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2213 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2214 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2215 }
2216
2217 /*
2218 * Verify that the new TSS can be accessed and map it. Map only the required contents
2219 * and not the entire TSS.
2220 */
2221 void *pvNewTSS;
2222 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2223 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2224 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2225 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2226 * not perform correct translation if this happens. See Intel spec. 7.2.1
2227 * "Task-State Segment". */
2228 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2229 if (rcStrict != VINF_SUCCESS)
2230 {
2231 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2232 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2233 return rcStrict;
2234 }
2235
2236 /*
2237 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2238 */
2239 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2240 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2241 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2242 {
2243 PX86DESC pDescCurTSS;
2244 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2245 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2246 if (rcStrict != VINF_SUCCESS)
2247 {
2248 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2249 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2250 return rcStrict;
2251 }
2252
2253 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2254 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2255 if (rcStrict != VINF_SUCCESS)
2256 {
2257 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2258 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2259 return rcStrict;
2260 }
2261
2262 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2263 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2264 {
2265 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2266 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2267 u32EFlags &= ~X86_EFL_NT;
2268 }
2269 }
2270
2271 /*
2272 * Save the CPU state into the current TSS.
2273 */
2274 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2275 if (GCPtrNewTSS == GCPtrCurTSS)
2276 {
2277 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2278 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2279 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2280 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2281 pVCpu->cpum.GstCtx.ldtr.Sel));
2282 }
2283 if (fIsNewTSS386)
2284 {
2285 /*
2286 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2287 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2288 */
2289 void *pvCurTSS32;
2290 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2291 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2292 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2293 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2294 if (rcStrict != VINF_SUCCESS)
2295 {
2296 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2297 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2298 return rcStrict;
2299 }
2300
2301 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2302 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2303 pCurTSS32->eip = uNextEip;
2304 pCurTSS32->eflags = u32EFlags;
2305 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2306 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2307 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2308 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2309 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2310 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2311 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2312 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2313 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2314 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2315 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2316 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2317 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2318 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2319
2320 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2321 if (rcStrict != VINF_SUCCESS)
2322 {
2323 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2324 VBOXSTRICTRC_VAL(rcStrict)));
2325 return rcStrict;
2326 }
2327 }
2328 else
2329 {
2330 /*
2331 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2332 */
2333 void *pvCurTSS16;
2334 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2335 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2336 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2337 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2338 if (rcStrict != VINF_SUCCESS)
2339 {
2340 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2341 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2342 return rcStrict;
2343 }
2344
2345 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2346 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2347 pCurTSS16->ip = uNextEip;
2348 pCurTSS16->flags = u32EFlags;
2349 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2350 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2351 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2352 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2353 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2354 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2355 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2356 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2357 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2358 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2359 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2360 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2361
2362 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2363 if (rcStrict != VINF_SUCCESS)
2364 {
2365 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2366 VBOXSTRICTRC_VAL(rcStrict)));
2367 return rcStrict;
2368 }
2369 }
2370
2371 /*
2372 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2373 */
2374 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2375 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2376 {
2377 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2378 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2379 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2380 }
2381
2382 /*
2383 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2384 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2385 */
2386 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2387 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2388 bool fNewDebugTrap;
2389 if (fIsNewTSS386)
2390 {
2391 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2392 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2393 uNewEip = pNewTSS32->eip;
2394 uNewEflags = pNewTSS32->eflags;
2395 uNewEax = pNewTSS32->eax;
2396 uNewEcx = pNewTSS32->ecx;
2397 uNewEdx = pNewTSS32->edx;
2398 uNewEbx = pNewTSS32->ebx;
2399 uNewEsp = pNewTSS32->esp;
2400 uNewEbp = pNewTSS32->ebp;
2401 uNewEsi = pNewTSS32->esi;
2402 uNewEdi = pNewTSS32->edi;
2403 uNewES = pNewTSS32->es;
2404 uNewCS = pNewTSS32->cs;
2405 uNewSS = pNewTSS32->ss;
2406 uNewDS = pNewTSS32->ds;
2407 uNewFS = pNewTSS32->fs;
2408 uNewGS = pNewTSS32->gs;
2409 uNewLdt = pNewTSS32->selLdt;
2410 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2411 }
2412 else
2413 {
2414 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2415 uNewCr3 = 0;
2416 uNewEip = pNewTSS16->ip;
2417 uNewEflags = pNewTSS16->flags;
2418 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2419 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2420 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2421 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2422 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2423 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2424 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2425 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2426 uNewES = pNewTSS16->es;
2427 uNewCS = pNewTSS16->cs;
2428 uNewSS = pNewTSS16->ss;
2429 uNewDS = pNewTSS16->ds;
2430 uNewFS = 0;
2431 uNewGS = 0;
2432 uNewLdt = pNewTSS16->selLdt;
2433 fNewDebugTrap = false;
2434 }
2435
2436 if (GCPtrNewTSS == GCPtrCurTSS)
2437 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2438 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2439
2440 /*
2441 * We're done accessing the new TSS.
2442 */
2443 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2444 if (rcStrict != VINF_SUCCESS)
2445 {
2446 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2447 return rcStrict;
2448 }
2449
2450 /*
2451 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2452 */
2453 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2454 {
2455 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2456 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2457 if (rcStrict != VINF_SUCCESS)
2458 {
2459 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2460 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2461 return rcStrict;
2462 }
2463
2464 /* Check that the descriptor indicates the new TSS is available (not busy). */
2465 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2466 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2467 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2468
2469 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2470 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2471 if (rcStrict != VINF_SUCCESS)
2472 {
2473 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2474 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2475 return rcStrict;
2476 }
2477 }
2478
2479 /*
2480 * From this point on, we're technically in the new task. We will defer exceptions
2481 * until the completion of the task switch but before executing any instructions in the new task.
2482 */
2483 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2484 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2485 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2486 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2487 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2488 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2489 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2490
2491 /* Set the busy bit in TR. */
2492 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2493
2494 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2495 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2496 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2497 {
2498 uNewEflags |= X86_EFL_NT;
2499 }
2500
2501 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2502 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2503 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2504
2505 pVCpu->cpum.GstCtx.eip = uNewEip;
2506 pVCpu->cpum.GstCtx.eax = uNewEax;
2507 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2508 pVCpu->cpum.GstCtx.edx = uNewEdx;
2509 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2510 pVCpu->cpum.GstCtx.esp = uNewEsp;
2511 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2512 pVCpu->cpum.GstCtx.esi = uNewEsi;
2513 pVCpu->cpum.GstCtx.edi = uNewEdi;
2514
2515 uNewEflags &= X86_EFL_LIVE_MASK;
2516 uNewEflags |= X86_EFL_RA1_MASK;
2517 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2518
2519 /*
2520 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2521 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2522 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2523 */
2524 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2525 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2526
2527 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2528 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2529
2530 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2531 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2532
2533 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2534 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2535
2536 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2537 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2538
2539 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2540 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2541 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2542
2543 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2544 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2545 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2546 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2547
2548 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2549 {
2550 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2551 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2552 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2553 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2554 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2555 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2556 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2557 }
2558
2559 /*
2560 * Switch CR3 for the new task.
2561 */
2562 if ( fIsNewTSS386
2563 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2564 {
2565 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2566 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2567 AssertRCSuccessReturn(rc, rc);
2568
2569 /* Inform PGM. */
2570 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2571 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2572 AssertRCReturn(rc, rc);
2573 /* ignore informational status codes */
2574
2575 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2576 }
2577
2578 /*
2579 * Switch LDTR for the new task.
2580 */
2581 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2582 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2583 else
2584 {
2585 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2586
2587 IEMSELDESC DescNewLdt;
2588 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2589 if (rcStrict != VINF_SUCCESS)
2590 {
2591 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2592 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2593 return rcStrict;
2594 }
2595 if ( !DescNewLdt.Legacy.Gen.u1Present
2596 || DescNewLdt.Legacy.Gen.u1DescType
2597 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2598 {
2599 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2600 uNewLdt, DescNewLdt.Legacy.u));
2601 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2602 }
2603
2604 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2605 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2606 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2607 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2608 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2609 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2610 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2611 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2612 }
2613
2614 IEMSELDESC DescSS;
2615 if (IEM_IS_V86_MODE(pVCpu))
2616 {
2617 pVCpu->iem.s.uCpl = 3;
2618 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2619 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2620 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2621 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2622 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2623 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2624
2625 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2626 DescSS.Legacy.u = 0;
2627 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2628 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2629 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2630 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2631 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2632 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2633 DescSS.Legacy.Gen.u2Dpl = 3;
2634 }
2635 else
2636 {
2637 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2638
2639 /*
2640 * Load the stack segment for the new task.
2641 */
2642 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2643 {
2644 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2645 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2646 }
2647
2648 /* Fetch the descriptor. */
2649 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2650 if (rcStrict != VINF_SUCCESS)
2651 {
2652 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2653 VBOXSTRICTRC_VAL(rcStrict)));
2654 return rcStrict;
2655 }
2656
2657 /* SS must be a data segment and writable. */
2658 if ( !DescSS.Legacy.Gen.u1DescType
2659 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2660 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2661 {
2662 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2663 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2664 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2665 }
2666
2667 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2668 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2669 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2670 {
2671 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2672 uNewCpl));
2673 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2674 }
2675
2676 /* Is it there? */
2677 if (!DescSS.Legacy.Gen.u1Present)
2678 {
2679 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2680 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2681 }
2682
2683 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2684 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2685
2686 /* Set the accessed bit before committing the result into SS. */
2687 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2688 {
2689 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2690 if (rcStrict != VINF_SUCCESS)
2691 return rcStrict;
2692 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2693 }
2694
2695 /* Commit SS. */
2696 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2697 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2698 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2699 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2700 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2701 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2702 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2703
2704 /* CPL has changed, update IEM before loading rest of segments. */
2705 pVCpu->iem.s.uCpl = uNewCpl;
2706
2707 /*
2708 * Load the data segments for the new task.
2709 */
2710 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2711 if (rcStrict != VINF_SUCCESS)
2712 return rcStrict;
2713 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2714 if (rcStrict != VINF_SUCCESS)
2715 return rcStrict;
2716 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2717 if (rcStrict != VINF_SUCCESS)
2718 return rcStrict;
2719 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2720 if (rcStrict != VINF_SUCCESS)
2721 return rcStrict;
2722
2723 /*
2724 * Load the code segment for the new task.
2725 */
2726 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2727 {
2728 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2729 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2730 }
2731
2732 /* Fetch the descriptor. */
2733 IEMSELDESC DescCS;
2734 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2735 if (rcStrict != VINF_SUCCESS)
2736 {
2737 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2738 return rcStrict;
2739 }
2740
2741 /* CS must be a code segment. */
2742 if ( !DescCS.Legacy.Gen.u1DescType
2743 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2744 {
2745 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2746 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2747 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2748 }
2749
2750 /* For conforming CS, DPL must be less than or equal to the RPL. */
2751 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2752 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2753 {
2754 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2755 DescCS.Legacy.Gen.u2Dpl));
2756 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2757 }
2758
2759 /* For non-conforming CS, DPL must match RPL. */
2760 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2761 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2762 {
2763 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2764 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2765 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2766 }
2767
2768 /* Is it there? */
2769 if (!DescCS.Legacy.Gen.u1Present)
2770 {
2771 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2772 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2773 }
2774
2775 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2776 u64Base = X86DESC_BASE(&DescCS.Legacy);
2777
2778 /* Set the accessed bit before committing the result into CS. */
2779 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2780 {
2781 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2782 if (rcStrict != VINF_SUCCESS)
2783 return rcStrict;
2784 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2785 }
2786
2787 /* Commit CS. */
2788 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2789 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2790 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2791 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2792 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2793 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2794 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2795 }
2796
2797 /** @todo Debug trap. */
2798 if (fIsNewTSS386 && fNewDebugTrap)
2799 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2800
2801 /*
2802 * Construct the error code masks based on what caused this task switch.
2803 * See Intel Instruction reference for INT.
2804 */
2805 uint16_t uExt;
2806 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2807 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2808 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2809 {
2810 uExt = 1;
2811 }
2812 else
2813 uExt = 0;
2814
2815 /*
2816 * Push any error code on to the new stack.
2817 */
2818 if (fFlags & IEM_XCPT_FLAGS_ERR)
2819 {
2820 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2821 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2822 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2823
2824 /* Check that there is sufficient space on the stack. */
2825 /** @todo Factor out segment limit checking for normal/expand down segments
2826 * into a separate function. */
2827 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2828 {
2829 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2830 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2831 {
2832 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2833 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2834 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2835 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2836 }
2837 }
2838 else
2839 {
2840 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2841 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2842 {
2843 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2844 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2845 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2846 }
2847 }
2848
2849
2850 if (fIsNewTSS386)
2851 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2852 else
2853 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2854 if (rcStrict != VINF_SUCCESS)
2855 {
2856 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2857 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2858 return rcStrict;
2859 }
2860 }
2861
2862 /* Check the new EIP against the new CS limit. */
2863 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2864 {
2865 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2866 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2867 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2868 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2869 }
2870
2871 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2872 pVCpu->cpum.GstCtx.ss.Sel));
2873 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2874}
2875
2876
2877/**
2878 * Implements exceptions and interrupts for protected mode.
2879 *
2880 * @returns VBox strict status code.
2881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2882 * @param cbInstr The number of bytes to offset rIP by in the return
2883 * address.
2884 * @param u8Vector The interrupt / exception vector number.
2885 * @param fFlags The flags.
2886 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2887 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2888 */
2889static VBOXSTRICTRC
2890iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2891 uint8_t cbInstr,
2892 uint8_t u8Vector,
2893 uint32_t fFlags,
2894 uint16_t uErr,
2895 uint64_t uCr2) RT_NOEXCEPT
2896{
2897 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2898
2899 /*
2900 * Read the IDT entry.
2901 */
2902 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2903 {
2904 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2905 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2906 }
2907 X86DESC Idte;
2908 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2909 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2910 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2911 {
2912 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2913 return rcStrict;
2914 }
2915 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2916 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2917 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2918
2919 /*
2920 * Check the descriptor type, DPL and such.
2921 * ASSUMES this is done in the same order as described for call-gate calls.
2922 */
2923 if (Idte.Gate.u1DescType)
2924 {
2925 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2926 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2927 }
2928 bool fTaskGate = false;
2929 uint8_t f32BitGate = true;
2930 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2931 switch (Idte.Gate.u4Type)
2932 {
2933 case X86_SEL_TYPE_SYS_UNDEFINED:
2934 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2935 case X86_SEL_TYPE_SYS_LDT:
2936 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2937 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2938 case X86_SEL_TYPE_SYS_UNDEFINED2:
2939 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2940 case X86_SEL_TYPE_SYS_UNDEFINED3:
2941 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2942 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2943 case X86_SEL_TYPE_SYS_UNDEFINED4:
2944 {
2945 /** @todo check what actually happens when the type is wrong...
2946 * esp. call gates. */
2947 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2948 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2949 }
2950
2951 case X86_SEL_TYPE_SYS_286_INT_GATE:
2952 f32BitGate = false;
2953 RT_FALL_THRU();
2954 case X86_SEL_TYPE_SYS_386_INT_GATE:
2955 fEflToClear |= X86_EFL_IF;
2956 break;
2957
2958 case X86_SEL_TYPE_SYS_TASK_GATE:
2959 fTaskGate = true;
2960#ifndef IEM_IMPLEMENTS_TASKSWITCH
2961 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2962#endif
2963 break;
2964
2965 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2966 f32BitGate = false;
2967 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2968 break;
2969
2970 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2971 }
2972
2973 /* Check DPL against CPL if applicable. */
2974 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2975 {
2976 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2977 {
2978 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2979 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2980 }
2981 }
2982
2983 /* Is it there? */
2984 if (!Idte.Gate.u1Present)
2985 {
2986 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2987 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2988 }
2989
2990 /* Is it a task-gate? */
2991 if (fTaskGate)
2992 {
2993 /*
2994 * Construct the error code masks based on what caused this task switch.
2995 * See Intel Instruction reference for INT.
2996 */
2997 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2998 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2999 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3000 RTSEL SelTSS = Idte.Gate.u16Sel;
3001
3002 /*
3003 * Fetch the TSS descriptor in the GDT.
3004 */
3005 IEMSELDESC DescTSS;
3006 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3007 if (rcStrict != VINF_SUCCESS)
3008 {
3009 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3010 VBOXSTRICTRC_VAL(rcStrict)));
3011 return rcStrict;
3012 }
3013
3014 /* The TSS descriptor must be a system segment and be available (not busy). */
3015 if ( DescTSS.Legacy.Gen.u1DescType
3016 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3017 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3018 {
3019 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3020 u8Vector, SelTSS, DescTSS.Legacy.au64));
3021 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3022 }
3023
3024 /* The TSS must be present. */
3025 if (!DescTSS.Legacy.Gen.u1Present)
3026 {
3027 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3028 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3029 }
3030
3031 /* Do the actual task switch. */
3032 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3033 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3034 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3035 }
3036
3037 /* A null CS is bad. */
3038 RTSEL NewCS = Idte.Gate.u16Sel;
3039 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3040 {
3041 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3042 return iemRaiseGeneralProtectionFault0(pVCpu);
3043 }
3044
3045 /* Fetch the descriptor for the new CS. */
3046 IEMSELDESC DescCS;
3047 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3048 if (rcStrict != VINF_SUCCESS)
3049 {
3050 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3051 return rcStrict;
3052 }
3053
3054 /* Must be a code segment. */
3055 if (!DescCS.Legacy.Gen.u1DescType)
3056 {
3057 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3058 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3059 }
3060 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3061 {
3062 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3063 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3064 }
3065
3066 /* Don't allow lowering the privilege level. */
3067 /** @todo Does the lowering of privileges apply to software interrupts
3068 * only? This has bearings on the more-privileged or
3069 * same-privilege stack behavior further down. A testcase would
3070 * be nice. */
3071 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3072 {
3073 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3074 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3075 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3076 }
3077
3078 /* Make sure the selector is present. */
3079 if (!DescCS.Legacy.Gen.u1Present)
3080 {
3081 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3082 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3083 }
3084
3085 /* Check the new EIP against the new CS limit. */
3086 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3087 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3088 ? Idte.Gate.u16OffsetLow
3089 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3090 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3091 if (uNewEip > cbLimitCS)
3092 {
3093 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3094 u8Vector, uNewEip, cbLimitCS, NewCS));
3095 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3096 }
3097 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3098
3099 /* Calc the flag image to push. */
3100 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3101 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3102 fEfl &= ~X86_EFL_RF;
3103 else
3104 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3105
3106 /* From V8086 mode only go to CPL 0. */
3107 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3108 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3109 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3110 {
3111 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3112 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3113 }
3114
3115 /*
3116 * If the privilege level changes, we need to get a new stack from the TSS.
3117 * This in turns means validating the new SS and ESP...
3118 */
3119 if (uNewCpl != pVCpu->iem.s.uCpl)
3120 {
3121 RTSEL NewSS;
3122 uint32_t uNewEsp;
3123 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3124 if (rcStrict != VINF_SUCCESS)
3125 return rcStrict;
3126
3127 IEMSELDESC DescSS;
3128 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3129 if (rcStrict != VINF_SUCCESS)
3130 return rcStrict;
3131 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3132 if (!DescSS.Legacy.Gen.u1DefBig)
3133 {
3134 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3135 uNewEsp = (uint16_t)uNewEsp;
3136 }
3137
3138 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3139
3140 /* Check that there is sufficient space for the stack frame. */
3141 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3142 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3143 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3144 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3145
3146 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3147 {
3148 if ( uNewEsp - 1 > cbLimitSS
3149 || uNewEsp < cbStackFrame)
3150 {
3151 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3152 u8Vector, NewSS, uNewEsp, cbStackFrame));
3153 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3154 }
3155 }
3156 else
3157 {
3158 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3159 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3160 {
3161 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3162 u8Vector, NewSS, uNewEsp, cbStackFrame));
3163 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3164 }
3165 }
3166
3167 /*
3168 * Start making changes.
3169 */
3170
3171 /* Set the new CPL so that stack accesses use it. */
3172 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3173 pVCpu->iem.s.uCpl = uNewCpl;
3174
3175 /* Create the stack frame. */
3176 RTPTRUNION uStackFrame;
3177 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3178 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3179 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3180 if (rcStrict != VINF_SUCCESS)
3181 return rcStrict;
3182 void * const pvStackFrame = uStackFrame.pv;
3183 if (f32BitGate)
3184 {
3185 if (fFlags & IEM_XCPT_FLAGS_ERR)
3186 *uStackFrame.pu32++ = uErr;
3187 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3188 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3189 uStackFrame.pu32[2] = fEfl;
3190 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3191 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3192 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3193 if (fEfl & X86_EFL_VM)
3194 {
3195 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3196 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3197 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3198 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3199 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3200 }
3201 }
3202 else
3203 {
3204 if (fFlags & IEM_XCPT_FLAGS_ERR)
3205 *uStackFrame.pu16++ = uErr;
3206 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3207 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3208 uStackFrame.pu16[2] = fEfl;
3209 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3210 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3211 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3212 if (fEfl & X86_EFL_VM)
3213 {
3214 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3215 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3216 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3217 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3218 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3219 }
3220 }
3221 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3222 if (rcStrict != VINF_SUCCESS)
3223 return rcStrict;
3224
3225 /* Mark the selectors 'accessed' (hope this is the correct time). */
3226 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3227 * after pushing the stack frame? (Write protect the gdt + stack to
3228 * find out.) */
3229 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3230 {
3231 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3232 if (rcStrict != VINF_SUCCESS)
3233 return rcStrict;
3234 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3235 }
3236
3237 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3238 {
3239 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3240 if (rcStrict != VINF_SUCCESS)
3241 return rcStrict;
3242 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3243 }
3244
3245 /*
3246 * Start comitting the register changes (joins with the DPL=CPL branch).
3247 */
3248 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3249 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3250 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3251 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3252 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3253 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3254 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3255 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3256 * SP is loaded).
3257 * Need to check the other combinations too:
3258 * - 16-bit TSS, 32-bit handler
3259 * - 32-bit TSS, 16-bit handler */
3260 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3261 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3262 else
3263 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3264
3265 if (fEfl & X86_EFL_VM)
3266 {
3267 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3268 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3269 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3270 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3271 }
3272 }
3273 /*
3274 * Same privilege, no stack change and smaller stack frame.
3275 */
3276 else
3277 {
3278 uint64_t uNewRsp;
3279 RTPTRUNION uStackFrame;
3280 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3281 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3282 if (rcStrict != VINF_SUCCESS)
3283 return rcStrict;
3284 void * const pvStackFrame = uStackFrame.pv;
3285
3286 if (f32BitGate)
3287 {
3288 if (fFlags & IEM_XCPT_FLAGS_ERR)
3289 *uStackFrame.pu32++ = uErr;
3290 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3291 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3292 uStackFrame.pu32[2] = fEfl;
3293 }
3294 else
3295 {
3296 if (fFlags & IEM_XCPT_FLAGS_ERR)
3297 *uStackFrame.pu16++ = uErr;
3298 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3299 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3300 uStackFrame.pu16[2] = fEfl;
3301 }
3302 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3303 if (rcStrict != VINF_SUCCESS)
3304 return rcStrict;
3305
3306 /* Mark the CS selector as 'accessed'. */
3307 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3308 {
3309 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3310 if (rcStrict != VINF_SUCCESS)
3311 return rcStrict;
3312 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3313 }
3314
3315 /*
3316 * Start committing the register changes (joins with the other branch).
3317 */
3318 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3319 }
3320
3321 /* ... register committing continues. */
3322 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3323 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3324 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3325 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3326 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3327 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3328
3329 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3330 fEfl &= ~fEflToClear;
3331 IEMMISC_SET_EFL(pVCpu, fEfl);
3332
3333 if (fFlags & IEM_XCPT_FLAGS_CR2)
3334 pVCpu->cpum.GstCtx.cr2 = uCr2;
3335
3336 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3337 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3338
3339 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3340}
3341
3342
3343/**
3344 * Implements exceptions and interrupts for long mode.
3345 *
3346 * @returns VBox strict status code.
3347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3348 * @param cbInstr The number of bytes to offset rIP by in the return
3349 * address.
3350 * @param u8Vector The interrupt / exception vector number.
3351 * @param fFlags The flags.
3352 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3353 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3354 */
3355static VBOXSTRICTRC
3356iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3357 uint8_t cbInstr,
3358 uint8_t u8Vector,
3359 uint32_t fFlags,
3360 uint16_t uErr,
3361 uint64_t uCr2) RT_NOEXCEPT
3362{
3363 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3364
3365 /*
3366 * Read the IDT entry.
3367 */
3368 uint16_t offIdt = (uint16_t)u8Vector << 4;
3369 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3370 {
3371 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3372 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3373 }
3374 X86DESC64 Idte;
3375#ifdef _MSC_VER /* Shut up silly compiler warning. */
3376 Idte.au64[0] = 0;
3377 Idte.au64[1] = 0;
3378#endif
3379 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3380 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3381 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3382 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3383 {
3384 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3385 return rcStrict;
3386 }
3387 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3388 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3389 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3390
3391 /*
3392 * Check the descriptor type, DPL and such.
3393 * ASSUMES this is done in the same order as described for call-gate calls.
3394 */
3395 if (Idte.Gate.u1DescType)
3396 {
3397 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3398 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3399 }
3400 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3401 switch (Idte.Gate.u4Type)
3402 {
3403 case AMD64_SEL_TYPE_SYS_INT_GATE:
3404 fEflToClear |= X86_EFL_IF;
3405 break;
3406 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3407 break;
3408
3409 default:
3410 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3411 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3412 }
3413
3414 /* Check DPL against CPL if applicable. */
3415 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3416 {
3417 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3418 {
3419 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3420 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3421 }
3422 }
3423
3424 /* Is it there? */
3425 if (!Idte.Gate.u1Present)
3426 {
3427 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3428 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3429 }
3430
3431 /* A null CS is bad. */
3432 RTSEL NewCS = Idte.Gate.u16Sel;
3433 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3434 {
3435 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3436 return iemRaiseGeneralProtectionFault0(pVCpu);
3437 }
3438
3439 /* Fetch the descriptor for the new CS. */
3440 IEMSELDESC DescCS;
3441 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3442 if (rcStrict != VINF_SUCCESS)
3443 {
3444 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3445 return rcStrict;
3446 }
3447
3448 /* Must be a 64-bit code segment. */
3449 if (!DescCS.Long.Gen.u1DescType)
3450 {
3451 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3452 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3453 }
3454 if ( !DescCS.Long.Gen.u1Long
3455 || DescCS.Long.Gen.u1DefBig
3456 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3457 {
3458 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3459 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3460 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3461 }
3462
3463 /* Don't allow lowering the privilege level. For non-conforming CS
3464 selectors, the CS.DPL sets the privilege level the trap/interrupt
3465 handler runs at. For conforming CS selectors, the CPL remains
3466 unchanged, but the CS.DPL must be <= CPL. */
3467 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3468 * when CPU in Ring-0. Result \#GP? */
3469 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3470 {
3471 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3472 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3473 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3474 }
3475
3476
3477 /* Make sure the selector is present. */
3478 if (!DescCS.Legacy.Gen.u1Present)
3479 {
3480 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3481 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3482 }
3483
3484 /* Check that the new RIP is canonical. */
3485 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3486 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3487 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3488 if (!IEM_IS_CANONICAL(uNewRip))
3489 {
3490 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3491 return iemRaiseGeneralProtectionFault0(pVCpu);
3492 }
3493
3494 /*
3495 * If the privilege level changes or if the IST isn't zero, we need to get
3496 * a new stack from the TSS.
3497 */
3498 uint64_t uNewRsp;
3499 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3500 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3501 if ( uNewCpl != pVCpu->iem.s.uCpl
3502 || Idte.Gate.u3IST != 0)
3503 {
3504 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3505 if (rcStrict != VINF_SUCCESS)
3506 return rcStrict;
3507 }
3508 else
3509 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3510 uNewRsp &= ~(uint64_t)0xf;
3511
3512 /*
3513 * Calc the flag image to push.
3514 */
3515 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3516 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3517 fEfl &= ~X86_EFL_RF;
3518 else
3519 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3520
3521 /*
3522 * Start making changes.
3523 */
3524 /* Set the new CPL so that stack accesses use it. */
3525 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3526 pVCpu->iem.s.uCpl = uNewCpl;
3527
3528 /* Create the stack frame. */
3529 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3530 RTPTRUNION uStackFrame;
3531 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3532 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3533 if (rcStrict != VINF_SUCCESS)
3534 return rcStrict;
3535 void * const pvStackFrame = uStackFrame.pv;
3536
3537 if (fFlags & IEM_XCPT_FLAGS_ERR)
3538 *uStackFrame.pu64++ = uErr;
3539 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3540 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3541 uStackFrame.pu64[2] = fEfl;
3542 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3543 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3544 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3545 if (rcStrict != VINF_SUCCESS)
3546 return rcStrict;
3547
3548 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3549 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3550 * after pushing the stack frame? (Write protect the gdt + stack to
3551 * find out.) */
3552 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3553 {
3554 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3555 if (rcStrict != VINF_SUCCESS)
3556 return rcStrict;
3557 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3558 }
3559
3560 /*
3561 * Start comitting the register changes.
3562 */
3563 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3564 * hidden registers when interrupting 32-bit or 16-bit code! */
3565 if (uNewCpl != uOldCpl)
3566 {
3567 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3568 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3569 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3570 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3571 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3572 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3573 }
3574 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3575 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3576 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3577 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3578 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3579 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3580 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3581 pVCpu->cpum.GstCtx.rip = uNewRip;
3582
3583 fEfl &= ~fEflToClear;
3584 IEMMISC_SET_EFL(pVCpu, fEfl);
3585
3586 if (fFlags & IEM_XCPT_FLAGS_CR2)
3587 pVCpu->cpum.GstCtx.cr2 = uCr2;
3588
3589 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3590 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3591
3592 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3593}
3594
3595
3596/**
3597 * Implements exceptions and interrupts.
3598 *
3599 * All exceptions and interrupts goes thru this function!
3600 *
3601 * @returns VBox strict status code.
3602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3603 * @param cbInstr The number of bytes to offset rIP by in the return
3604 * address.
3605 * @param u8Vector The interrupt / exception vector number.
3606 * @param fFlags The flags.
3607 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3608 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3609 */
3610VBOXSTRICTRC
3611iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3612 uint8_t cbInstr,
3613 uint8_t u8Vector,
3614 uint32_t fFlags,
3615 uint16_t uErr,
3616 uint64_t uCr2) RT_NOEXCEPT
3617{
3618 /*
3619 * Get all the state that we might need here.
3620 */
3621 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3622 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3623
3624#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3625 /*
3626 * Flush prefetch buffer
3627 */
3628 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3629#endif
3630
3631 /*
3632 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3633 */
3634 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3635 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3636 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3637 | IEM_XCPT_FLAGS_BP_INSTR
3638 | IEM_XCPT_FLAGS_ICEBP_INSTR
3639 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3640 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3641 {
3642 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3643 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3644 u8Vector = X86_XCPT_GP;
3645 uErr = 0;
3646 }
3647#ifdef DBGFTRACE_ENABLED
3648 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3649 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3650 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3651#endif
3652
3653 /*
3654 * Evaluate whether NMI blocking should be in effect.
3655 * Normally, NMI blocking is in effect whenever we inject an NMI.
3656 */
3657 bool fBlockNmi;
3658 if ( u8Vector == X86_XCPT_NMI
3659 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3660 fBlockNmi = true;
3661 else
3662 fBlockNmi = false;
3663
3664#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3665 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3666 {
3667 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3668 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3669 return rcStrict0;
3670
3671 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3672 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3673 {
3674 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3675 fBlockNmi = false;
3676 }
3677 }
3678#endif
3679
3680#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3681 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3682 {
3683 /*
3684 * If the event is being injected as part of VMRUN, it isn't subject to event
3685 * intercepts in the nested-guest. However, secondary exceptions that occur
3686 * during injection of any event -are- subject to exception intercepts.
3687 *
3688 * See AMD spec. 15.20 "Event Injection".
3689 */
3690 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3691 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3692 else
3693 {
3694 /*
3695 * Check and handle if the event being raised is intercepted.
3696 */
3697 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3698 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3699 return rcStrict0;
3700 }
3701 }
3702#endif
3703
3704 /*
3705 * Set NMI blocking if necessary.
3706 */
3707 if ( fBlockNmi
3708 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3709 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3710
3711 /*
3712 * Do recursion accounting.
3713 */
3714 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3715 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3716 if (pVCpu->iem.s.cXcptRecursions == 0)
3717 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3718 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3719 else
3720 {
3721 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3722 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3723 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3724
3725 if (pVCpu->iem.s.cXcptRecursions >= 4)
3726 {
3727#ifdef DEBUG_bird
3728 AssertFailed();
3729#endif
3730 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3731 }
3732
3733 /*
3734 * Evaluate the sequence of recurring events.
3735 */
3736 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3737 NULL /* pXcptRaiseInfo */);
3738 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3739 { /* likely */ }
3740 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3741 {
3742 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3743 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3744 u8Vector = X86_XCPT_DF;
3745 uErr = 0;
3746#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3747 /* VMX nested-guest #DF intercept needs to be checked here. */
3748 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3749 {
3750 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3751 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3752 return rcStrict0;
3753 }
3754#endif
3755 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3756 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3757 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3758 }
3759 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3760 {
3761 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3762 return iemInitiateCpuShutdown(pVCpu);
3763 }
3764 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3765 {
3766 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3767 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3768 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3769 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3770 return VERR_EM_GUEST_CPU_HANG;
3771 }
3772 else
3773 {
3774 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3775 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3776 return VERR_IEM_IPE_9;
3777 }
3778
3779 /*
3780 * The 'EXT' bit is set when an exception occurs during deliver of an external
3781 * event (such as an interrupt or earlier exception)[1]. Privileged software
3782 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3783 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3784 *
3785 * [1] - Intel spec. 6.13 "Error Code"
3786 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3787 * [3] - Intel Instruction reference for INT n.
3788 */
3789 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3790 && (fFlags & IEM_XCPT_FLAGS_ERR)
3791 && u8Vector != X86_XCPT_PF
3792 && u8Vector != X86_XCPT_DF)
3793 {
3794 uErr |= X86_TRAP_ERR_EXTERNAL;
3795 }
3796 }
3797
3798 pVCpu->iem.s.cXcptRecursions++;
3799 pVCpu->iem.s.uCurXcpt = u8Vector;
3800 pVCpu->iem.s.fCurXcpt = fFlags;
3801 pVCpu->iem.s.uCurXcptErr = uErr;
3802 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3803
3804 /*
3805 * Extensive logging.
3806 */
3807#if defined(LOG_ENABLED) && defined(IN_RING3)
3808 if (LogIs3Enabled())
3809 {
3810 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3811 PVM pVM = pVCpu->CTX_SUFF(pVM);
3812 char szRegs[4096];
3813 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3814 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3815 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3816 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3817 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3818 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3819 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3820 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3821 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3822 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3823 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3824 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3825 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3826 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3827 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3828 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3829 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3830 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3831 " efer=%016VR{efer}\n"
3832 " pat=%016VR{pat}\n"
3833 " sf_mask=%016VR{sf_mask}\n"
3834 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3835 " lstar=%016VR{lstar}\n"
3836 " star=%016VR{star} cstar=%016VR{cstar}\n"
3837 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3838 );
3839
3840 char szInstr[256];
3841 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3842 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3843 szInstr, sizeof(szInstr), NULL);
3844 Log3(("%s%s\n", szRegs, szInstr));
3845 }
3846#endif /* LOG_ENABLED */
3847
3848 /*
3849 * Stats.
3850 */
3851 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3852 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3853 else if (u8Vector <= X86_XCPT_LAST)
3854 {
3855 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3856 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3857 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3858 }
3859
3860 /*
3861 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3862 * to ensure that a stale TLB or paging cache entry will only cause one
3863 * spurious #PF.
3864 */
3865 if ( u8Vector == X86_XCPT_PF
3866 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3867 IEMTlbInvalidatePage(pVCpu, uCr2);
3868
3869 /*
3870 * Call the mode specific worker function.
3871 */
3872 VBOXSTRICTRC rcStrict;
3873 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3874 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3875 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3876 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3877 else
3878 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3879
3880 /* Flush the prefetch buffer. */
3881#ifdef IEM_WITH_CODE_TLB
3882 pVCpu->iem.s.pbInstrBuf = NULL;
3883#else
3884 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3885#endif
3886
3887 /*
3888 * Unwind.
3889 */
3890 pVCpu->iem.s.cXcptRecursions--;
3891 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3892 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3893 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3894 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3895 pVCpu->iem.s.cXcptRecursions + 1));
3896 return rcStrict;
3897}
3898
3899#ifdef IEM_WITH_SETJMP
3900/**
3901 * See iemRaiseXcptOrInt. Will not return.
3902 */
3903DECL_NO_RETURN(void)
3904iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3905 uint8_t cbInstr,
3906 uint8_t u8Vector,
3907 uint32_t fFlags,
3908 uint16_t uErr,
3909 uint64_t uCr2) RT_NOEXCEPT
3910{
3911 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3912 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3913}
3914#endif
3915
3916
3917/** \#DE - 00. */
3918VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3919{
3920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3921}
3922
3923
3924/** \#DB - 01.
3925 * @note This automatically clear DR7.GD. */
3926VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3927{
3928 /** @todo set/clear RF. */
3929 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3930 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3931}
3932
3933
3934/** \#BR - 05. */
3935VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3936{
3937 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3938}
3939
3940
3941/** \#UD - 06. */
3942VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3943{
3944 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3945}
3946
3947
3948/** \#NM - 07. */
3949VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3950{
3951 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3952}
3953
3954
3955/** \#TS(err) - 0a. */
3956VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3957{
3958 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3959}
3960
3961
3962/** \#TS(tr) - 0a. */
3963VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3964{
3965 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3966 pVCpu->cpum.GstCtx.tr.Sel, 0);
3967}
3968
3969
3970/** \#TS(0) - 0a. */
3971VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3972{
3973 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3974 0, 0);
3975}
3976
3977
3978/** \#TS(err) - 0a. */
3979VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3980{
3981 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3982 uSel & X86_SEL_MASK_OFF_RPL, 0);
3983}
3984
3985
3986/** \#NP(err) - 0b. */
3987VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3988{
3989 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3990}
3991
3992
3993/** \#NP(sel) - 0b. */
3994VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3995{
3996 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3997 uSel & ~X86_SEL_RPL, 0);
3998}
3999
4000
4001/** \#SS(seg) - 0c. */
4002VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4003{
4004 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4005 uSel & ~X86_SEL_RPL, 0);
4006}
4007
4008
4009/** \#SS(err) - 0c. */
4010VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4011{
4012 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4013}
4014
4015
4016/** \#GP(n) - 0d. */
4017VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4018{
4019 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4020}
4021
4022
4023/** \#GP(0) - 0d. */
4024VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4025{
4026 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4027}
4028
4029#ifdef IEM_WITH_SETJMP
4030/** \#GP(0) - 0d. */
4031DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4032{
4033 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4034}
4035#endif
4036
4037
4038/** \#GP(sel) - 0d. */
4039VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4040{
4041 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4042 Sel & ~X86_SEL_RPL, 0);
4043}
4044
4045
4046/** \#GP(0) - 0d. */
4047VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4048{
4049 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4050}
4051
4052
4053/** \#GP(sel) - 0d. */
4054VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4055{
4056 NOREF(iSegReg); NOREF(fAccess);
4057 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4058 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4059}
4060
4061#ifdef IEM_WITH_SETJMP
4062/** \#GP(sel) - 0d, longjmp. */
4063DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4064{
4065 NOREF(iSegReg); NOREF(fAccess);
4066 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4067 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4068}
4069#endif
4070
4071/** \#GP(sel) - 0d. */
4072VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4073{
4074 NOREF(Sel);
4075 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4076}
4077
4078#ifdef IEM_WITH_SETJMP
4079/** \#GP(sel) - 0d, longjmp. */
4080DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4081{
4082 NOREF(Sel);
4083 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4084}
4085#endif
4086
4087
4088/** \#GP(sel) - 0d. */
4089VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4090{
4091 NOREF(iSegReg); NOREF(fAccess);
4092 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4093}
4094
4095#ifdef IEM_WITH_SETJMP
4096/** \#GP(sel) - 0d, longjmp. */
4097DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4098{
4099 NOREF(iSegReg); NOREF(fAccess);
4100 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4101}
4102#endif
4103
4104
4105/** \#PF(n) - 0e. */
4106VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4107{
4108 uint16_t uErr;
4109 switch (rc)
4110 {
4111 case VERR_PAGE_NOT_PRESENT:
4112 case VERR_PAGE_TABLE_NOT_PRESENT:
4113 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4114 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4115 uErr = 0;
4116 break;
4117
4118 default:
4119 AssertMsgFailed(("%Rrc\n", rc));
4120 RT_FALL_THRU();
4121 case VERR_ACCESS_DENIED:
4122 uErr = X86_TRAP_PF_P;
4123 break;
4124
4125 /** @todo reserved */
4126 }
4127
4128 if (pVCpu->iem.s.uCpl == 3)
4129 uErr |= X86_TRAP_PF_US;
4130
4131 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4132 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4133 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4134 uErr |= X86_TRAP_PF_ID;
4135
4136#if 0 /* This is so much non-sense, really. Why was it done like that? */
4137 /* Note! RW access callers reporting a WRITE protection fault, will clear
4138 the READ flag before calling. So, read-modify-write accesses (RW)
4139 can safely be reported as READ faults. */
4140 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4141 uErr |= X86_TRAP_PF_RW;
4142#else
4143 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4144 {
4145 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4146 /// (regardless of outcome of the comparison in the latter case).
4147 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4148 uErr |= X86_TRAP_PF_RW;
4149 }
4150#endif
4151
4152 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4153 uErr, GCPtrWhere);
4154}
4155
4156#ifdef IEM_WITH_SETJMP
4157/** \#PF(n) - 0e, longjmp. */
4158DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4159{
4160 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4161}
4162#endif
4163
4164
4165/** \#MF(0) - 10. */
4166VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4167{
4168 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4169}
4170
4171
4172/** \#AC(0) - 11. */
4173VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4174{
4175 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4176}
4177
4178#ifdef IEM_WITH_SETJMP
4179/** \#AC(0) - 11, longjmp. */
4180DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4181{
4182 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4183}
4184#endif
4185
4186
4187/** \#XF(0)/\#XM(0) - 19. */
4188VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4189{
4190 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4191}
4192
4193
4194/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4195IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4196{
4197 NOREF(cbInstr);
4198 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4199}
4200
4201
4202/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4203IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4204{
4205 NOREF(cbInstr);
4206 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4207}
4208
4209
4210/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4211IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4212{
4213 NOREF(cbInstr);
4214 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4215}
4216
4217
4218/** @} */
4219
4220/** @name Common opcode decoders.
4221 * @{
4222 */
4223//#include <iprt/mem.h>
4224
4225/**
4226 * Used to add extra details about a stub case.
4227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4228 */
4229void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4230{
4231#if defined(LOG_ENABLED) && defined(IN_RING3)
4232 PVM pVM = pVCpu->CTX_SUFF(pVM);
4233 char szRegs[4096];
4234 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4235 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4236 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4237 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4238 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4239 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4240 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4241 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4242 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4243 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4244 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4245 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4246 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4247 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4248 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4249 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4250 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4251 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4252 " efer=%016VR{efer}\n"
4253 " pat=%016VR{pat}\n"
4254 " sf_mask=%016VR{sf_mask}\n"
4255 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4256 " lstar=%016VR{lstar}\n"
4257 " star=%016VR{star} cstar=%016VR{cstar}\n"
4258 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4259 );
4260
4261 char szInstr[256];
4262 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4263 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4264 szInstr, sizeof(szInstr), NULL);
4265
4266 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4267#else
4268 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4269#endif
4270}
4271
4272/** @} */
4273
4274
4275
4276/** @name Register Access.
4277 * @{
4278 */
4279
4280/**
4281 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4282 *
4283 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4284 * segment limit.
4285 *
4286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4287 * @param offNextInstr The offset of the next instruction.
4288 */
4289VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4290{
4291 switch (pVCpu->iem.s.enmEffOpSize)
4292 {
4293 case IEMMODE_16BIT:
4294 {
4295 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4296 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4297 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4298 return iemRaiseGeneralProtectionFault0(pVCpu);
4299 pVCpu->cpum.GstCtx.rip = uNewIp;
4300 break;
4301 }
4302
4303 case IEMMODE_32BIT:
4304 {
4305 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4306 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4307
4308 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4309 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4310 return iemRaiseGeneralProtectionFault0(pVCpu);
4311 pVCpu->cpum.GstCtx.rip = uNewEip;
4312 break;
4313 }
4314
4315 case IEMMODE_64BIT:
4316 {
4317 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4318
4319 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4320 if (!IEM_IS_CANONICAL(uNewRip))
4321 return iemRaiseGeneralProtectionFault0(pVCpu);
4322 pVCpu->cpum.GstCtx.rip = uNewRip;
4323 break;
4324 }
4325
4326 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4327 }
4328
4329 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4330
4331#ifndef IEM_WITH_CODE_TLB
4332 /* Flush the prefetch buffer. */
4333 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4334#endif
4335
4336 return VINF_SUCCESS;
4337}
4338
4339
4340/**
4341 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4342 *
4343 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4344 * segment limit.
4345 *
4346 * @returns Strict VBox status code.
4347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4348 * @param offNextInstr The offset of the next instruction.
4349 */
4350VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4351{
4352 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4353
4354 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4355 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4356 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4357 return iemRaiseGeneralProtectionFault0(pVCpu);
4358 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4359 pVCpu->cpum.GstCtx.rip = uNewIp;
4360 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4361
4362#ifndef IEM_WITH_CODE_TLB
4363 /* Flush the prefetch buffer. */
4364 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4365#endif
4366
4367 return VINF_SUCCESS;
4368}
4369
4370
4371/**
4372 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4373 *
4374 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4375 * segment limit.
4376 *
4377 * @returns Strict VBox status code.
4378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4379 * @param offNextInstr The offset of the next instruction.
4380 */
4381VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4382{
4383 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4384
4385 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4386 {
4387 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4388
4389 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4390 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4391 return iemRaiseGeneralProtectionFault0(pVCpu);
4392 pVCpu->cpum.GstCtx.rip = uNewEip;
4393 }
4394 else
4395 {
4396 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4397
4398 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4399 if (!IEM_IS_CANONICAL(uNewRip))
4400 return iemRaiseGeneralProtectionFault0(pVCpu);
4401 pVCpu->cpum.GstCtx.rip = uNewRip;
4402 }
4403 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4404
4405#ifndef IEM_WITH_CODE_TLB
4406 /* Flush the prefetch buffer. */
4407 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4408#endif
4409
4410 return VINF_SUCCESS;
4411}
4412
4413
4414/**
4415 * Performs a near jump to the specified address.
4416 *
4417 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4418 * segment limit.
4419 *
4420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4421 * @param uNewRip The new RIP value.
4422 */
4423VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4424{
4425 switch (pVCpu->iem.s.enmEffOpSize)
4426 {
4427 case IEMMODE_16BIT:
4428 {
4429 Assert(uNewRip <= UINT16_MAX);
4430 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4431 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4432 return iemRaiseGeneralProtectionFault0(pVCpu);
4433 /** @todo Test 16-bit jump in 64-bit mode. */
4434 pVCpu->cpum.GstCtx.rip = uNewRip;
4435 break;
4436 }
4437
4438 case IEMMODE_32BIT:
4439 {
4440 Assert(uNewRip <= UINT32_MAX);
4441 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4442 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4443
4444 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4445 return iemRaiseGeneralProtectionFault0(pVCpu);
4446 pVCpu->cpum.GstCtx.rip = uNewRip;
4447 break;
4448 }
4449
4450 case IEMMODE_64BIT:
4451 {
4452 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4453
4454 if (!IEM_IS_CANONICAL(uNewRip))
4455 return iemRaiseGeneralProtectionFault0(pVCpu);
4456 pVCpu->cpum.GstCtx.rip = uNewRip;
4457 break;
4458 }
4459
4460 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4461 }
4462
4463 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4464
4465#ifndef IEM_WITH_CODE_TLB
4466 /* Flush the prefetch buffer. */
4467 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4468#endif
4469
4470 return VINF_SUCCESS;
4471}
4472
4473/** @} */
4474
4475
4476/** @name FPU access and helpers.
4477 *
4478 * @{
4479 */
4480
4481/**
4482 * Updates the x87.DS and FPUDP registers.
4483 *
4484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4485 * @param pFpuCtx The FPU context.
4486 * @param iEffSeg The effective segment register.
4487 * @param GCPtrEff The effective address relative to @a iEffSeg.
4488 */
4489DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4490{
4491 RTSEL sel;
4492 switch (iEffSeg)
4493 {
4494 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4495 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4496 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4497 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4498 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4499 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4500 default:
4501 AssertMsgFailed(("%d\n", iEffSeg));
4502 sel = pVCpu->cpum.GstCtx.ds.Sel;
4503 }
4504 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4505 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4506 {
4507 pFpuCtx->DS = 0;
4508 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4509 }
4510 else if (!IEM_IS_LONG_MODE(pVCpu))
4511 {
4512 pFpuCtx->DS = sel;
4513 pFpuCtx->FPUDP = GCPtrEff;
4514 }
4515 else
4516 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4517}
4518
4519
4520/**
4521 * Rotates the stack registers in the push direction.
4522 *
4523 * @param pFpuCtx The FPU context.
4524 * @remarks This is a complete waste of time, but fxsave stores the registers in
4525 * stack order.
4526 */
4527DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4528{
4529 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4530 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4531 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4532 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4533 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4534 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4535 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4536 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4537 pFpuCtx->aRegs[0].r80 = r80Tmp;
4538}
4539
4540
4541/**
4542 * Rotates the stack registers in the pop direction.
4543 *
4544 * @param pFpuCtx The FPU context.
4545 * @remarks This is a complete waste of time, but fxsave stores the registers in
4546 * stack order.
4547 */
4548DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4549{
4550 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4551 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4552 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4553 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4554 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4555 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4556 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4557 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4558 pFpuCtx->aRegs[7].r80 = r80Tmp;
4559}
4560
4561
4562/**
4563 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4564 * exception prevents it.
4565 *
4566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4567 * @param pResult The FPU operation result to push.
4568 * @param pFpuCtx The FPU context.
4569 */
4570static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4571{
4572 /* Update FSW and bail if there are pending exceptions afterwards. */
4573 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4574 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4575 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4576 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4577 {
4578 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4579 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4580 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4581 pFpuCtx->FSW = fFsw;
4582 return;
4583 }
4584
4585 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4586 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4587 {
4588 /* All is fine, push the actual value. */
4589 pFpuCtx->FTW |= RT_BIT(iNewTop);
4590 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4591 }
4592 else if (pFpuCtx->FCW & X86_FCW_IM)
4593 {
4594 /* Masked stack overflow, push QNaN. */
4595 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4596 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4597 }
4598 else
4599 {
4600 /* Raise stack overflow, don't push anything. */
4601 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4602 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4603 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4604 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4605 return;
4606 }
4607
4608 fFsw &= ~X86_FSW_TOP_MASK;
4609 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4610 pFpuCtx->FSW = fFsw;
4611
4612 iemFpuRotateStackPush(pFpuCtx);
4613 RT_NOREF(pVCpu);
4614}
4615
4616
4617/**
4618 * Stores a result in a FPU register and updates the FSW and FTW.
4619 *
4620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4621 * @param pFpuCtx The FPU context.
4622 * @param pResult The result to store.
4623 * @param iStReg Which FPU register to store it in.
4624 */
4625static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4626{
4627 Assert(iStReg < 8);
4628 uint16_t fNewFsw = pFpuCtx->FSW;
4629 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4630 fNewFsw &= ~X86_FSW_C_MASK;
4631 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4632 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4633 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4634 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4635 pFpuCtx->FSW = fNewFsw;
4636 pFpuCtx->FTW |= RT_BIT(iReg);
4637 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4638 RT_NOREF(pVCpu);
4639}
4640
4641
4642/**
4643 * Only updates the FPU status word (FSW) with the result of the current
4644 * instruction.
4645 *
4646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4647 * @param pFpuCtx The FPU context.
4648 * @param u16FSW The FSW output of the current instruction.
4649 */
4650static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4651{
4652 uint16_t fNewFsw = pFpuCtx->FSW;
4653 fNewFsw &= ~X86_FSW_C_MASK;
4654 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4655 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4656 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4657 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4658 pFpuCtx->FSW = fNewFsw;
4659 RT_NOREF(pVCpu);
4660}
4661
4662
4663/**
4664 * Pops one item off the FPU stack if no pending exception prevents it.
4665 *
4666 * @param pFpuCtx The FPU context.
4667 */
4668static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4669{
4670 /* Check pending exceptions. */
4671 uint16_t uFSW = pFpuCtx->FSW;
4672 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4673 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4674 return;
4675
4676 /* TOP--. */
4677 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4678 uFSW &= ~X86_FSW_TOP_MASK;
4679 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4680 pFpuCtx->FSW = uFSW;
4681
4682 /* Mark the previous ST0 as empty. */
4683 iOldTop >>= X86_FSW_TOP_SHIFT;
4684 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4685
4686 /* Rotate the registers. */
4687 iemFpuRotateStackPop(pFpuCtx);
4688}
4689
4690
4691/**
4692 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4693 *
4694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4695 * @param pResult The FPU operation result to push.
4696 */
4697void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4698{
4699 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4700 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4701 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4702}
4703
4704
4705/**
4706 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4707 * and sets FPUDP and FPUDS.
4708 *
4709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4710 * @param pResult The FPU operation result to push.
4711 * @param iEffSeg The effective segment register.
4712 * @param GCPtrEff The effective address relative to @a iEffSeg.
4713 */
4714void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4715{
4716 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4717 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4718 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4719 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4720}
4721
4722
4723/**
4724 * Replace ST0 with the first value and push the second onto the FPU stack,
4725 * unless a pending exception prevents it.
4726 *
4727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4728 * @param pResult The FPU operation result to store and push.
4729 */
4730void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4731{
4732 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4733 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4734
4735 /* Update FSW and bail if there are pending exceptions afterwards. */
4736 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4737 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4738 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4739 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4740 {
4741 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4742 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4743 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4744 pFpuCtx->FSW = fFsw;
4745 return;
4746 }
4747
4748 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4749 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4750 {
4751 /* All is fine, push the actual value. */
4752 pFpuCtx->FTW |= RT_BIT(iNewTop);
4753 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4754 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4755 }
4756 else if (pFpuCtx->FCW & X86_FCW_IM)
4757 {
4758 /* Masked stack overflow, push QNaN. */
4759 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4760 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4761 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4762 }
4763 else
4764 {
4765 /* Raise stack overflow, don't push anything. */
4766 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4767 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4768 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4769 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4770 return;
4771 }
4772
4773 fFsw &= ~X86_FSW_TOP_MASK;
4774 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4775 pFpuCtx->FSW = fFsw;
4776
4777 iemFpuRotateStackPush(pFpuCtx);
4778}
4779
4780
4781/**
4782 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4783 * FOP.
4784 *
4785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4786 * @param pResult The result to store.
4787 * @param iStReg Which FPU register to store it in.
4788 */
4789void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4790{
4791 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4792 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4793 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4794}
4795
4796
4797/**
4798 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4799 * FOP, and then pops the stack.
4800 *
4801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4802 * @param pResult The result to store.
4803 * @param iStReg Which FPU register to store it in.
4804 */
4805void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4806{
4807 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4808 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4809 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4810 iemFpuMaybePopOne(pFpuCtx);
4811}
4812
4813
4814/**
4815 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4816 * FPUDP, and FPUDS.
4817 *
4818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4819 * @param pResult The result to store.
4820 * @param iStReg Which FPU register to store it in.
4821 * @param iEffSeg The effective memory operand selector register.
4822 * @param GCPtrEff The effective memory operand offset.
4823 */
4824void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4825 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4826{
4827 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4828 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4829 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4830 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4831}
4832
4833
4834/**
4835 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4836 * FPUDP, and FPUDS, and then pops the stack.
4837 *
4838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4839 * @param pResult The result to store.
4840 * @param iStReg Which FPU register to store it in.
4841 * @param iEffSeg The effective memory operand selector register.
4842 * @param GCPtrEff The effective memory operand offset.
4843 */
4844void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4845 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4846{
4847 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4848 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4849 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4850 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4851 iemFpuMaybePopOne(pFpuCtx);
4852}
4853
4854
4855/**
4856 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4857 *
4858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4859 */
4860void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4861{
4862 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4863 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4864}
4865
4866
4867/**
4868 * Updates the FSW, FOP, FPUIP, and FPUCS.
4869 *
4870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4871 * @param u16FSW The FSW from the current instruction.
4872 */
4873void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4874{
4875 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4876 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4877 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4878}
4879
4880
4881/**
4882 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4883 *
4884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4885 * @param u16FSW The FSW from the current instruction.
4886 */
4887void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4888{
4889 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4890 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4891 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4892 iemFpuMaybePopOne(pFpuCtx);
4893}
4894
4895
4896/**
4897 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4898 *
4899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4900 * @param u16FSW The FSW from the current instruction.
4901 * @param iEffSeg The effective memory operand selector register.
4902 * @param GCPtrEff The effective memory operand offset.
4903 */
4904void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4905{
4906 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4907 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4908 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4909 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4910}
4911
4912
4913/**
4914 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4915 *
4916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4917 * @param u16FSW The FSW from the current instruction.
4918 */
4919void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4920{
4921 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4922 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4923 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4924 iemFpuMaybePopOne(pFpuCtx);
4925 iemFpuMaybePopOne(pFpuCtx);
4926}
4927
4928
4929/**
4930 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4931 *
4932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4933 * @param u16FSW The FSW from the current instruction.
4934 * @param iEffSeg The effective memory operand selector register.
4935 * @param GCPtrEff The effective memory operand offset.
4936 */
4937void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4938{
4939 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4940 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4941 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4942 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4943 iemFpuMaybePopOne(pFpuCtx);
4944}
4945
4946
4947/**
4948 * Worker routine for raising an FPU stack underflow exception.
4949 *
4950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4951 * @param pFpuCtx The FPU context.
4952 * @param iStReg The stack register being accessed.
4953 */
4954static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
4955{
4956 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4957 if (pFpuCtx->FCW & X86_FCW_IM)
4958 {
4959 /* Masked underflow. */
4960 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4961 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4962 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4963 if (iStReg != UINT8_MAX)
4964 {
4965 pFpuCtx->FTW |= RT_BIT(iReg);
4966 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4967 }
4968 }
4969 else
4970 {
4971 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4972 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4973 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
4974 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4975 }
4976 RT_NOREF(pVCpu);
4977}
4978
4979
4980/**
4981 * Raises a FPU stack underflow exception.
4982 *
4983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4984 * @param iStReg The destination register that should be loaded
4985 * with QNaN if \#IS is not masked. Specify
4986 * UINT8_MAX if none (like for fcom).
4987 */
4988void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4989{
4990 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4991 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4992 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
4993}
4994
4995
4996void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4997{
4998 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4999 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5000 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5001 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5002}
5003
5004
5005void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5006{
5007 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5008 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5009 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5010 iemFpuMaybePopOne(pFpuCtx);
5011}
5012
5013
5014void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5015{
5016 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5017 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5018 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5019 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5020 iemFpuMaybePopOne(pFpuCtx);
5021}
5022
5023
5024void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5025{
5026 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5027 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5028 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5029 iemFpuMaybePopOne(pFpuCtx);
5030 iemFpuMaybePopOne(pFpuCtx);
5031}
5032
5033
5034void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5035{
5036 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5037 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5038
5039 if (pFpuCtx->FCW & X86_FCW_IM)
5040 {
5041 /* Masked overflow - Push QNaN. */
5042 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5043 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5044 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5045 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5046 pFpuCtx->FTW |= RT_BIT(iNewTop);
5047 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5048 iemFpuRotateStackPush(pFpuCtx);
5049 }
5050 else
5051 {
5052 /* Exception pending - don't change TOP or the register stack. */
5053 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5054 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5055 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5056 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5057 }
5058}
5059
5060
5061void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5062{
5063 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5064 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5065
5066 if (pFpuCtx->FCW & X86_FCW_IM)
5067 {
5068 /* Masked overflow - Push QNaN. */
5069 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5070 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5071 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5072 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5073 pFpuCtx->FTW |= RT_BIT(iNewTop);
5074 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5075 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5076 iemFpuRotateStackPush(pFpuCtx);
5077 }
5078 else
5079 {
5080 /* Exception pending - don't change TOP or the register stack. */
5081 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5082 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5083 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5084 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5085 }
5086}
5087
5088
5089/**
5090 * Worker routine for raising an FPU stack overflow exception on a push.
5091 *
5092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5093 * @param pFpuCtx The FPU context.
5094 */
5095static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5096{
5097 if (pFpuCtx->FCW & X86_FCW_IM)
5098 {
5099 /* Masked overflow. */
5100 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5101 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5102 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5103 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5104 pFpuCtx->FTW |= RT_BIT(iNewTop);
5105 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5106 iemFpuRotateStackPush(pFpuCtx);
5107 }
5108 else
5109 {
5110 /* Exception pending - don't change TOP or the register stack. */
5111 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5112 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5113 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5115 }
5116 RT_NOREF(pVCpu);
5117}
5118
5119
5120/**
5121 * Raises a FPU stack overflow exception on a push.
5122 *
5123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5124 */
5125void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5126{
5127 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5128 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5129 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5130}
5131
5132
5133/**
5134 * Raises a FPU stack overflow exception on a push with a memory operand.
5135 *
5136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5137 * @param iEffSeg The effective memory operand selector register.
5138 * @param GCPtrEff The effective memory operand offset.
5139 */
5140void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5141{
5142 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5143 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5144 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5145 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5146}
5147
5148/** @} */
5149
5150
5151/** @name SSE+AVX SIMD access and helpers.
5152 *
5153 * @{
5154 */
5155/**
5156 * Stores a result in a SIMD XMM register, updates the MXCSR.
5157 *
5158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5159 * @param pResult The result to store.
5160 * @param iXmmReg Which SIMD XMM register to store the result in.
5161 */
5162void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5163{
5164 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5165 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5166
5167 /* The result is only updated if there is no unmasked exception pending. */
5168 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT) \
5169 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) != 0)
5170 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5171}
5172
5173
5174/**
5175 * Updates the MXCSR.
5176 *
5177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5178 * @param fMxcsr The new MXCSR value.
5179 */
5180void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5181{
5182 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5183 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5184}
5185/** @} */
5186
5187
5188/** @name Memory access.
5189 *
5190 * @{
5191 */
5192
5193
5194/**
5195 * Updates the IEMCPU::cbWritten counter if applicable.
5196 *
5197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5198 * @param fAccess The access being accounted for.
5199 * @param cbMem The access size.
5200 */
5201DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5202{
5203 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5204 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5205 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5206}
5207
5208
5209/**
5210 * Applies the segment limit, base and attributes.
5211 *
5212 * This may raise a \#GP or \#SS.
5213 *
5214 * @returns VBox strict status code.
5215 *
5216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5217 * @param fAccess The kind of access which is being performed.
5218 * @param iSegReg The index of the segment register to apply.
5219 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5220 * TSS, ++).
5221 * @param cbMem The access size.
5222 * @param pGCPtrMem Pointer to the guest memory address to apply
5223 * segmentation to. Input and output parameter.
5224 */
5225VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5226{
5227 if (iSegReg == UINT8_MAX)
5228 return VINF_SUCCESS;
5229
5230 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5231 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5232 switch (pVCpu->iem.s.enmCpuMode)
5233 {
5234 case IEMMODE_16BIT:
5235 case IEMMODE_32BIT:
5236 {
5237 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5238 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5239
5240 if ( pSel->Attr.n.u1Present
5241 && !pSel->Attr.n.u1Unusable)
5242 {
5243 Assert(pSel->Attr.n.u1DescType);
5244 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5245 {
5246 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5247 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5248 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5249
5250 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5251 {
5252 /** @todo CPL check. */
5253 }
5254
5255 /*
5256 * There are two kinds of data selectors, normal and expand down.
5257 */
5258 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5259 {
5260 if ( GCPtrFirst32 > pSel->u32Limit
5261 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5262 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5263 }
5264 else
5265 {
5266 /*
5267 * The upper boundary is defined by the B bit, not the G bit!
5268 */
5269 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5270 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5271 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5272 }
5273 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5274 }
5275 else
5276 {
5277 /*
5278 * Code selector and usually be used to read thru, writing is
5279 * only permitted in real and V8086 mode.
5280 */
5281 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5282 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5283 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5284 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5285 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5286
5287 if ( GCPtrFirst32 > pSel->u32Limit
5288 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5289 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5290
5291 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5292 {
5293 /** @todo CPL check. */
5294 }
5295
5296 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5297 }
5298 }
5299 else
5300 return iemRaiseGeneralProtectionFault0(pVCpu);
5301 return VINF_SUCCESS;
5302 }
5303
5304 case IEMMODE_64BIT:
5305 {
5306 RTGCPTR GCPtrMem = *pGCPtrMem;
5307 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5308 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5309
5310 Assert(cbMem >= 1);
5311 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5312 return VINF_SUCCESS;
5313 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5314 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5315 return iemRaiseGeneralProtectionFault0(pVCpu);
5316 }
5317
5318 default:
5319 AssertFailedReturn(VERR_IEM_IPE_7);
5320 }
5321}
5322
5323
5324/**
5325 * Translates a virtual address to a physical physical address and checks if we
5326 * can access the page as specified.
5327 *
5328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5329 * @param GCPtrMem The virtual address.
5330 * @param fAccess The intended access.
5331 * @param pGCPhysMem Where to return the physical address.
5332 */
5333VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5334{
5335 /** @todo Need a different PGM interface here. We're currently using
5336 * generic / REM interfaces. this won't cut it for R0. */
5337 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5338 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5339 * here. */
5340 PGMPTWALK Walk;
5341 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5342 if (RT_FAILURE(rc))
5343 {
5344 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5345 /** @todo Check unassigned memory in unpaged mode. */
5346 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5347#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5348 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5349 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5350#endif
5351 *pGCPhysMem = NIL_RTGCPHYS;
5352 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5353 }
5354
5355 /* If the page is writable and does not have the no-exec bit set, all
5356 access is allowed. Otherwise we'll have to check more carefully... */
5357 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5358 {
5359 /* Write to read only memory? */
5360 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5361 && !(Walk.fEffective & X86_PTE_RW)
5362 && ( ( pVCpu->iem.s.uCpl == 3
5363 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5364 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5365 {
5366 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5367 *pGCPhysMem = NIL_RTGCPHYS;
5368#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5369 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5370 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5371#endif
5372 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5373 }
5374
5375 /* Kernel memory accessed by userland? */
5376 if ( !(Walk.fEffective & X86_PTE_US)
5377 && pVCpu->iem.s.uCpl == 3
5378 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5379 {
5380 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5381 *pGCPhysMem = NIL_RTGCPHYS;
5382#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5383 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5384 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5385#endif
5386 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5387 }
5388
5389 /* Executing non-executable memory? */
5390 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5391 && (Walk.fEffective & X86_PTE_PAE_NX)
5392 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5393 {
5394 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5395 *pGCPhysMem = NIL_RTGCPHYS;
5396#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5397 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5398 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5399#endif
5400 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5401 VERR_ACCESS_DENIED);
5402 }
5403 }
5404
5405 /*
5406 * Set the dirty / access flags.
5407 * ASSUMES this is set when the address is translated rather than on committ...
5408 */
5409 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5410 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5411 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5412 {
5413 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5414 AssertRC(rc2);
5415 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5416 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5417 }
5418
5419 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5420 *pGCPhysMem = GCPhys;
5421 return VINF_SUCCESS;
5422}
5423
5424
5425/**
5426 * Looks up a memory mapping entry.
5427 *
5428 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5430 * @param pvMem The memory address.
5431 * @param fAccess The access to.
5432 */
5433DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5434{
5435 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5436 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5437 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5438 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5439 return 0;
5440 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5441 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5442 return 1;
5443 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5444 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5445 return 2;
5446 return VERR_NOT_FOUND;
5447}
5448
5449
5450/**
5451 * Finds a free memmap entry when using iNextMapping doesn't work.
5452 *
5453 * @returns Memory mapping index, 1024 on failure.
5454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5455 */
5456static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5457{
5458 /*
5459 * The easy case.
5460 */
5461 if (pVCpu->iem.s.cActiveMappings == 0)
5462 {
5463 pVCpu->iem.s.iNextMapping = 1;
5464 return 0;
5465 }
5466
5467 /* There should be enough mappings for all instructions. */
5468 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5469
5470 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5471 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5472 return i;
5473
5474 AssertFailedReturn(1024);
5475}
5476
5477
5478/**
5479 * Commits a bounce buffer that needs writing back and unmaps it.
5480 *
5481 * @returns Strict VBox status code.
5482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5483 * @param iMemMap The index of the buffer to commit.
5484 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5485 * Always false in ring-3, obviously.
5486 */
5487static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5488{
5489 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5490 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5491#ifdef IN_RING3
5492 Assert(!fPostponeFail);
5493 RT_NOREF_PV(fPostponeFail);
5494#endif
5495
5496 /*
5497 * Do the writing.
5498 */
5499 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5500 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5501 {
5502 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5503 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5504 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5505 if (!pVCpu->iem.s.fBypassHandlers)
5506 {
5507 /*
5508 * Carefully and efficiently dealing with access handler return
5509 * codes make this a little bloated.
5510 */
5511 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5512 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5513 pbBuf,
5514 cbFirst,
5515 PGMACCESSORIGIN_IEM);
5516 if (rcStrict == VINF_SUCCESS)
5517 {
5518 if (cbSecond)
5519 {
5520 rcStrict = PGMPhysWrite(pVM,
5521 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5522 pbBuf + cbFirst,
5523 cbSecond,
5524 PGMACCESSORIGIN_IEM);
5525 if (rcStrict == VINF_SUCCESS)
5526 { /* nothing */ }
5527 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5528 {
5529 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5530 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5531 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5532 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5533 }
5534#ifndef IN_RING3
5535 else if (fPostponeFail)
5536 {
5537 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5538 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5539 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5540 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5541 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5542 return iemSetPassUpStatus(pVCpu, rcStrict);
5543 }
5544#endif
5545 else
5546 {
5547 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5548 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5549 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5550 return rcStrict;
5551 }
5552 }
5553 }
5554 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5555 {
5556 if (!cbSecond)
5557 {
5558 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5559 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5560 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5561 }
5562 else
5563 {
5564 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5565 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5566 pbBuf + cbFirst,
5567 cbSecond,
5568 PGMACCESSORIGIN_IEM);
5569 if (rcStrict2 == VINF_SUCCESS)
5570 {
5571 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5572 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5573 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5574 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5575 }
5576 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5577 {
5578 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5579 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5580 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5581 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5582 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5583 }
5584#ifndef IN_RING3
5585 else if (fPostponeFail)
5586 {
5587 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5588 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5589 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5590 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5591 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5592 return iemSetPassUpStatus(pVCpu, rcStrict);
5593 }
5594#endif
5595 else
5596 {
5597 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5598 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5599 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5600 return rcStrict2;
5601 }
5602 }
5603 }
5604#ifndef IN_RING3
5605 else if (fPostponeFail)
5606 {
5607 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5608 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5609 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5610 if (!cbSecond)
5611 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5612 else
5613 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5614 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5615 return iemSetPassUpStatus(pVCpu, rcStrict);
5616 }
5617#endif
5618 else
5619 {
5620 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5621 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5622 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5623 return rcStrict;
5624 }
5625 }
5626 else
5627 {
5628 /*
5629 * No access handlers, much simpler.
5630 */
5631 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5632 if (RT_SUCCESS(rc))
5633 {
5634 if (cbSecond)
5635 {
5636 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5637 if (RT_SUCCESS(rc))
5638 { /* likely */ }
5639 else
5640 {
5641 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5642 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5643 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5644 return rc;
5645 }
5646 }
5647 }
5648 else
5649 {
5650 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5651 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5652 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5653 return rc;
5654 }
5655 }
5656 }
5657
5658#if defined(IEM_LOG_MEMORY_WRITES)
5659 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5660 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5661 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5662 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5663 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5664 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5665
5666 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5667 g_cbIemWrote = cbWrote;
5668 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5669#endif
5670
5671 /*
5672 * Free the mapping entry.
5673 */
5674 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5675 Assert(pVCpu->iem.s.cActiveMappings != 0);
5676 pVCpu->iem.s.cActiveMappings--;
5677 return VINF_SUCCESS;
5678}
5679
5680
5681/**
5682 * iemMemMap worker that deals with a request crossing pages.
5683 */
5684static VBOXSTRICTRC
5685iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5686{
5687 /*
5688 * Do the address translations.
5689 */
5690 RTGCPHYS GCPhysFirst;
5691 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5692 if (rcStrict != VINF_SUCCESS)
5693 return rcStrict;
5694
5695 RTGCPHYS GCPhysSecond;
5696 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5697 fAccess, &GCPhysSecond);
5698 if (rcStrict != VINF_SUCCESS)
5699 return rcStrict;
5700 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5701
5702 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5703
5704 /*
5705 * Read in the current memory content if it's a read, execute or partial
5706 * write access.
5707 */
5708 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5709 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5710 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5711
5712 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5713 {
5714 if (!pVCpu->iem.s.fBypassHandlers)
5715 {
5716 /*
5717 * Must carefully deal with access handler status codes here,
5718 * makes the code a bit bloated.
5719 */
5720 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5721 if (rcStrict == VINF_SUCCESS)
5722 {
5723 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5724 if (rcStrict == VINF_SUCCESS)
5725 { /*likely */ }
5726 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5727 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5728 else
5729 {
5730 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5731 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5732 return rcStrict;
5733 }
5734 }
5735 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5736 {
5737 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5738 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5739 {
5740 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5741 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5742 }
5743 else
5744 {
5745 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5746 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5747 return rcStrict2;
5748 }
5749 }
5750 else
5751 {
5752 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5753 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5754 return rcStrict;
5755 }
5756 }
5757 else
5758 {
5759 /*
5760 * No informational status codes here, much more straight forward.
5761 */
5762 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5763 if (RT_SUCCESS(rc))
5764 {
5765 Assert(rc == VINF_SUCCESS);
5766 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5767 if (RT_SUCCESS(rc))
5768 Assert(rc == VINF_SUCCESS);
5769 else
5770 {
5771 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5772 return rc;
5773 }
5774 }
5775 else
5776 {
5777 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5778 return rc;
5779 }
5780 }
5781 }
5782#ifdef VBOX_STRICT
5783 else
5784 memset(pbBuf, 0xcc, cbMem);
5785 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5786 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5787#endif
5788 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
5789
5790 /*
5791 * Commit the bounce buffer entry.
5792 */
5793 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5794 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5795 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5796 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5797 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5798 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5799 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5800 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5801 pVCpu->iem.s.cActiveMappings++;
5802
5803 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5804 *ppvMem = pbBuf;
5805 return VINF_SUCCESS;
5806}
5807
5808
5809/**
5810 * iemMemMap woker that deals with iemMemPageMap failures.
5811 */
5812static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5813 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5814{
5815 /*
5816 * Filter out conditions we can handle and the ones which shouldn't happen.
5817 */
5818 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5819 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5820 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5821 {
5822 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5823 return rcMap;
5824 }
5825 pVCpu->iem.s.cPotentialExits++;
5826
5827 /*
5828 * Read in the current memory content if it's a read, execute or partial
5829 * write access.
5830 */
5831 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5832 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5833 {
5834 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5835 memset(pbBuf, 0xff, cbMem);
5836 else
5837 {
5838 int rc;
5839 if (!pVCpu->iem.s.fBypassHandlers)
5840 {
5841 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5842 if (rcStrict == VINF_SUCCESS)
5843 { /* nothing */ }
5844 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5845 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5846 else
5847 {
5848 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5849 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5850 return rcStrict;
5851 }
5852 }
5853 else
5854 {
5855 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5856 if (RT_SUCCESS(rc))
5857 { /* likely */ }
5858 else
5859 {
5860 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5861 GCPhysFirst, rc));
5862 return rc;
5863 }
5864 }
5865 }
5866 }
5867#ifdef VBOX_STRICT
5868 else
5869 memset(pbBuf, 0xcc, cbMem);
5870#endif
5871#ifdef VBOX_STRICT
5872 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5873 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5874#endif
5875
5876 /*
5877 * Commit the bounce buffer entry.
5878 */
5879 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5880 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5881 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5882 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5884 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5885 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5886 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5887 pVCpu->iem.s.cActiveMappings++;
5888
5889 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5890 *ppvMem = pbBuf;
5891 return VINF_SUCCESS;
5892}
5893
5894
5895
5896/**
5897 * Maps the specified guest memory for the given kind of access.
5898 *
5899 * This may be using bounce buffering of the memory if it's crossing a page
5900 * boundary or if there is an access handler installed for any of it. Because
5901 * of lock prefix guarantees, we're in for some extra clutter when this
5902 * happens.
5903 *
5904 * This may raise a \#GP, \#SS, \#PF or \#AC.
5905 *
5906 * @returns VBox strict status code.
5907 *
5908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5909 * @param ppvMem Where to return the pointer to the mapped memory.
5910 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5911 * 8, 12, 16, 32 or 512. When used by string operations
5912 * it can be up to a page.
5913 * @param iSegReg The index of the segment register to use for this
5914 * access. The base and limits are checked. Use UINT8_MAX
5915 * to indicate that no segmentation is required (for IDT,
5916 * GDT and LDT accesses).
5917 * @param GCPtrMem The address of the guest memory.
5918 * @param fAccess How the memory is being accessed. The
5919 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5920 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5921 * when raising exceptions.
5922 * @param uAlignCtl Alignment control:
5923 * - Bits 15:0 is the alignment mask.
5924 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5925 * IEM_MEMMAP_F_ALIGN_SSE, and
5926 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5927 * Pass zero to skip alignment.
5928 */
5929VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5930 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5931{
5932 /*
5933 * Check the input and figure out which mapping entry to use.
5934 */
5935 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
5936 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
5937 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
5938 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5939 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5940
5941 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5942 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5943 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5944 {
5945 iMemMap = iemMemMapFindFree(pVCpu);
5946 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5947 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5948 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5949 pVCpu->iem.s.aMemMappings[2].fAccess),
5950 VERR_IEM_IPE_9);
5951 }
5952
5953 /*
5954 * Map the memory, checking that we can actually access it. If something
5955 * slightly complicated happens, fall back on bounce buffering.
5956 */
5957 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5958 if (rcStrict == VINF_SUCCESS)
5959 { /* likely */ }
5960 else
5961 return rcStrict;
5962
5963 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5964 { /* likely */ }
5965 else
5966 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5967
5968 /*
5969 * Alignment check.
5970 */
5971 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
5972 { /* likelyish */ }
5973 else
5974 {
5975 /* Misaligned access. */
5976 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
5977 {
5978 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
5979 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
5980 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
5981 {
5982 AssertCompile(X86_CR0_AM == X86_EFL_AC);
5983
5984 if (iemMemAreAlignmentChecksEnabled(pVCpu))
5985 return iemRaiseAlignmentCheckException(pVCpu);
5986 }
5987 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
5988 && iemMemAreAlignmentChecksEnabled(pVCpu)
5989/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
5990 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
5991 )
5992 return iemRaiseAlignmentCheckException(pVCpu);
5993 else
5994 return iemRaiseGeneralProtectionFault0(pVCpu);
5995 }
5996 }
5997
5998#ifdef IEM_WITH_DATA_TLB
5999 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6000
6001 /*
6002 * Get the TLB entry for this page.
6003 */
6004 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6005 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6006 if (pTlbe->uTag == uTag)
6007 {
6008# ifdef VBOX_WITH_STATISTICS
6009 pVCpu->iem.s.DataTlb.cTlbHits++;
6010# endif
6011 }
6012 else
6013 {
6014 pVCpu->iem.s.DataTlb.cTlbMisses++;
6015 PGMPTWALK Walk;
6016 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6017 if (RT_FAILURE(rc))
6018 {
6019 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6020# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6021 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6022 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6023# endif
6024 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6025 }
6026
6027 Assert(Walk.fSucceeded);
6028 pTlbe->uTag = uTag;
6029 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6030 pTlbe->GCPhys = Walk.GCPhys;
6031 pTlbe->pbMappingR3 = NULL;
6032 }
6033
6034 /*
6035 * Check TLB page table level access flags.
6036 */
6037 /* If the page is either supervisor only or non-writable, we need to do
6038 more careful access checks. */
6039 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6040 {
6041 /* Write to read only memory? */
6042 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6043 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6044 && ( ( pVCpu->iem.s.uCpl == 3
6045 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6046 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6047 {
6048 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6049# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6050 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6051 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6052# endif
6053 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6054 }
6055
6056 /* Kernel memory accessed by userland? */
6057 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6058 && pVCpu->iem.s.uCpl == 3
6059 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6060 {
6061 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6062# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6063 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6064 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6065# endif
6066 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6067 }
6068 }
6069
6070 /*
6071 * Set the dirty / access flags.
6072 * ASSUMES this is set when the address is translated rather than on commit...
6073 */
6074 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6075 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6076 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6077 {
6078 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6079 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6080 AssertRC(rc2);
6081 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6082 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6083 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6084 }
6085
6086 /*
6087 * Look up the physical page info if necessary.
6088 */
6089 uint8_t *pbMem = NULL;
6090 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6091# ifdef IN_RING3
6092 pbMem = pTlbe->pbMappingR3;
6093# else
6094 pbMem = NULL;
6095# endif
6096 else
6097 {
6098 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6099 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6100 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6101 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6102 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6103 { /* likely */ }
6104 else
6105 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6106 pTlbe->pbMappingR3 = NULL;
6107 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6108 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6109 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6110 &pbMem, &pTlbe->fFlagsAndPhysRev);
6111 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6112# ifdef IN_RING3
6113 pTlbe->pbMappingR3 = pbMem;
6114# endif
6115 }
6116
6117 /*
6118 * Check the physical page level access and mapping.
6119 */
6120 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6121 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6122 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6123 { /* probably likely */ }
6124 else
6125 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6126 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6127 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6128 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6129 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6130 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6131
6132 if (pbMem)
6133 {
6134 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6135 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6136 fAccess |= IEM_ACCESS_NOT_LOCKED;
6137 }
6138 else
6139 {
6140 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6141 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6142 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6143 if (rcStrict != VINF_SUCCESS)
6144 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6145 }
6146
6147 void * const pvMem = pbMem;
6148
6149 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6150 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6151 if (fAccess & IEM_ACCESS_TYPE_READ)
6152 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6153
6154#else /* !IEM_WITH_DATA_TLB */
6155
6156 RTGCPHYS GCPhysFirst;
6157 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6158 if (rcStrict != VINF_SUCCESS)
6159 return rcStrict;
6160
6161 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6162 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6163 if (fAccess & IEM_ACCESS_TYPE_READ)
6164 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6165
6166 void *pvMem;
6167 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6168 if (rcStrict != VINF_SUCCESS)
6169 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6170
6171#endif /* !IEM_WITH_DATA_TLB */
6172
6173 /*
6174 * Fill in the mapping table entry.
6175 */
6176 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6177 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6178 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6179 pVCpu->iem.s.cActiveMappings += 1;
6180
6181 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6182 *ppvMem = pvMem;
6183
6184 return VINF_SUCCESS;
6185}
6186
6187
6188/**
6189 * Commits the guest memory if bounce buffered and unmaps it.
6190 *
6191 * @returns Strict VBox status code.
6192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6193 * @param pvMem The mapping.
6194 * @param fAccess The kind of access.
6195 */
6196VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6197{
6198 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6199 AssertReturn(iMemMap >= 0, iMemMap);
6200
6201 /* If it's bounce buffered, we may need to write back the buffer. */
6202 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6203 {
6204 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6205 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6206 }
6207 /* Otherwise unlock it. */
6208 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6209 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6210
6211 /* Free the entry. */
6212 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6213 Assert(pVCpu->iem.s.cActiveMappings != 0);
6214 pVCpu->iem.s.cActiveMappings--;
6215 return VINF_SUCCESS;
6216}
6217
6218#ifdef IEM_WITH_SETJMP
6219
6220/**
6221 * Maps the specified guest memory for the given kind of access, longjmp on
6222 * error.
6223 *
6224 * This may be using bounce buffering of the memory if it's crossing a page
6225 * boundary or if there is an access handler installed for any of it. Because
6226 * of lock prefix guarantees, we're in for some extra clutter when this
6227 * happens.
6228 *
6229 * This may raise a \#GP, \#SS, \#PF or \#AC.
6230 *
6231 * @returns Pointer to the mapped memory.
6232 *
6233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6234 * @param cbMem The number of bytes to map. This is usually 1,
6235 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6236 * string operations it can be up to a page.
6237 * @param iSegReg The index of the segment register to use for
6238 * this access. The base and limits are checked.
6239 * Use UINT8_MAX to indicate that no segmentation
6240 * is required (for IDT, GDT and LDT accesses).
6241 * @param GCPtrMem The address of the guest memory.
6242 * @param fAccess How the memory is being accessed. The
6243 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6244 * how to map the memory, while the
6245 * IEM_ACCESS_WHAT_XXX bit is used when raising
6246 * exceptions.
6247 * @param uAlignCtl Alignment control:
6248 * - Bits 15:0 is the alignment mask.
6249 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6250 * IEM_MEMMAP_F_ALIGN_SSE, and
6251 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6252 * Pass zero to skip alignment.
6253 */
6254void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6255 uint32_t uAlignCtl) RT_NOEXCEPT
6256{
6257 /*
6258 * Check the input, check segment access and adjust address
6259 * with segment base.
6260 */
6261 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6262 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6263 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6264
6265 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6266 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6267 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6268
6269 /*
6270 * Alignment check.
6271 */
6272 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6273 { /* likelyish */ }
6274 else
6275 {
6276 /* Misaligned access. */
6277 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6278 {
6279 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6280 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6281 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6282 {
6283 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6284
6285 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6286 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6287 }
6288 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6289 && iemMemAreAlignmentChecksEnabled(pVCpu)
6290/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6291 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6292 )
6293 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6294 else
6295 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6296 }
6297 }
6298
6299 /*
6300 * Figure out which mapping entry to use.
6301 */
6302 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6303 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6304 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6305 {
6306 iMemMap = iemMemMapFindFree(pVCpu);
6307 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6308 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6309 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6310 pVCpu->iem.s.aMemMappings[2].fAccess),
6311 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6312 }
6313
6314 /*
6315 * Crossing a page boundary?
6316 */
6317 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6318 { /* No (likely). */ }
6319 else
6320 {
6321 void *pvMem;
6322 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6323 if (rcStrict == VINF_SUCCESS)
6324 return pvMem;
6325 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6326 }
6327
6328#ifdef IEM_WITH_DATA_TLB
6329 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6330
6331 /*
6332 * Get the TLB entry for this page.
6333 */
6334 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6335 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6336 if (pTlbe->uTag == uTag)
6337 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6338 else
6339 {
6340 pVCpu->iem.s.DataTlb.cTlbMisses++;
6341 PGMPTWALK Walk;
6342 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6343 if (RT_FAILURE(rc))
6344 {
6345 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6346# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6347 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6348 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6349# endif
6350 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6351 }
6352
6353 Assert(Walk.fSucceeded);
6354 pTlbe->uTag = uTag;
6355 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6356 pTlbe->GCPhys = Walk.GCPhys;
6357 pTlbe->pbMappingR3 = NULL;
6358 }
6359
6360 /*
6361 * Check the flags and physical revision.
6362 */
6363 /** @todo make the caller pass these in with fAccess. */
6364 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6365 ? IEMTLBE_F_PT_NO_USER : 0;
6366 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6367 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6368 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6369 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6370 ? IEMTLBE_F_PT_NO_WRITE : 0)
6371 : 0;
6372 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6373 uint8_t *pbMem = NULL;
6374 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6375 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6376# ifdef IN_RING3
6377 pbMem = pTlbe->pbMappingR3;
6378# else
6379 pbMem = NULL;
6380# endif
6381 else
6382 {
6383 /*
6384 * Okay, something isn't quite right or needs refreshing.
6385 */
6386 /* Write to read only memory? */
6387 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6388 {
6389 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6390# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6391 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6392 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6393# endif
6394 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6395 }
6396
6397 /* Kernel memory accessed by userland? */
6398 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6399 {
6400 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6401# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6402 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6403 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6404# endif
6405 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6406 }
6407
6408 /* Set the dirty / access flags.
6409 ASSUMES this is set when the address is translated rather than on commit... */
6410 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6411 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6412 {
6413 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6414 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6415 AssertRC(rc2);
6416 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6417 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6418 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6419 }
6420
6421 /*
6422 * Check if the physical page info needs updating.
6423 */
6424 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6425# ifdef IN_RING3
6426 pbMem = pTlbe->pbMappingR3;
6427# else
6428 pbMem = NULL;
6429# endif
6430 else
6431 {
6432 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6433 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6434 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6435 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6436 pTlbe->pbMappingR3 = NULL;
6437 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6438 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6439 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6440 &pbMem, &pTlbe->fFlagsAndPhysRev);
6441 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6442# ifdef IN_RING3
6443 pTlbe->pbMappingR3 = pbMem;
6444# endif
6445 }
6446
6447 /*
6448 * Check the physical page level access and mapping.
6449 */
6450 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6451 { /* probably likely */ }
6452 else
6453 {
6454 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6455 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6456 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6457 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6458 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6459 if (rcStrict == VINF_SUCCESS)
6460 return pbMem;
6461 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6462 }
6463 }
6464 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6465
6466 if (pbMem)
6467 {
6468 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6469 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6470 fAccess |= IEM_ACCESS_NOT_LOCKED;
6471 }
6472 else
6473 {
6474 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6475 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6476 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6477 if (rcStrict == VINF_SUCCESS)
6478 return pbMem;
6479 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6480 }
6481
6482 void * const pvMem = pbMem;
6483
6484 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6485 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6486 if (fAccess & IEM_ACCESS_TYPE_READ)
6487 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6488
6489#else /* !IEM_WITH_DATA_TLB */
6490
6491
6492 RTGCPHYS GCPhysFirst;
6493 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6494 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6495 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6496
6497 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6498 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6499 if (fAccess & IEM_ACCESS_TYPE_READ)
6500 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6501
6502 void *pvMem;
6503 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6504 if (rcStrict == VINF_SUCCESS)
6505 { /* likely */ }
6506 else
6507 {
6508 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6509 if (rcStrict == VINF_SUCCESS)
6510 return pvMem;
6511 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6512 }
6513
6514#endif /* !IEM_WITH_DATA_TLB */
6515
6516 /*
6517 * Fill in the mapping table entry.
6518 */
6519 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6520 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6521 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6522 pVCpu->iem.s.cActiveMappings++;
6523
6524 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6525 return pvMem;
6526}
6527
6528
6529/**
6530 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6531 *
6532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6533 * @param pvMem The mapping.
6534 * @param fAccess The kind of access.
6535 */
6536void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6537{
6538 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6539 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6540
6541 /* If it's bounce buffered, we may need to write back the buffer. */
6542 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6543 {
6544 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6545 {
6546 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6547 if (rcStrict == VINF_SUCCESS)
6548 return;
6549 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6550 }
6551 }
6552 /* Otherwise unlock it. */
6553 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6554 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6555
6556 /* Free the entry. */
6557 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6558 Assert(pVCpu->iem.s.cActiveMappings != 0);
6559 pVCpu->iem.s.cActiveMappings--;
6560}
6561
6562#endif /* IEM_WITH_SETJMP */
6563
6564#ifndef IN_RING3
6565/**
6566 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6567 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6568 *
6569 * Allows the instruction to be completed and retired, while the IEM user will
6570 * return to ring-3 immediately afterwards and do the postponed writes there.
6571 *
6572 * @returns VBox status code (no strict statuses). Caller must check
6573 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6575 * @param pvMem The mapping.
6576 * @param fAccess The kind of access.
6577 */
6578VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6579{
6580 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6581 AssertReturn(iMemMap >= 0, iMemMap);
6582
6583 /* If it's bounce buffered, we may need to write back the buffer. */
6584 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6585 {
6586 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6587 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6588 }
6589 /* Otherwise unlock it. */
6590 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6591 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6592
6593 /* Free the entry. */
6594 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6595 Assert(pVCpu->iem.s.cActiveMappings != 0);
6596 pVCpu->iem.s.cActiveMappings--;
6597 return VINF_SUCCESS;
6598}
6599#endif
6600
6601
6602/**
6603 * Rollbacks mappings, releasing page locks and such.
6604 *
6605 * The caller shall only call this after checking cActiveMappings.
6606 *
6607 * @returns Strict VBox status code to pass up.
6608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6609 */
6610void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6611{
6612 Assert(pVCpu->iem.s.cActiveMappings > 0);
6613
6614 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6615 while (iMemMap-- > 0)
6616 {
6617 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6618 if (fAccess != IEM_ACCESS_INVALID)
6619 {
6620 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6621 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6622 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6623 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6624 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6625 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6626 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6627 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6628 pVCpu->iem.s.cActiveMappings--;
6629 }
6630 }
6631}
6632
6633
6634/**
6635 * Fetches a data byte.
6636 *
6637 * @returns Strict VBox status code.
6638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6639 * @param pu8Dst Where to return the byte.
6640 * @param iSegReg The index of the segment register to use for
6641 * this access. The base and limits are checked.
6642 * @param GCPtrMem The address of the guest memory.
6643 */
6644VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6645{
6646 /* The lazy approach for now... */
6647 uint8_t const *pu8Src;
6648 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6649 if (rc == VINF_SUCCESS)
6650 {
6651 *pu8Dst = *pu8Src;
6652 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6653 }
6654 return rc;
6655}
6656
6657
6658#ifdef IEM_WITH_SETJMP
6659/**
6660 * Fetches a data byte, longjmp on error.
6661 *
6662 * @returns The byte.
6663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6664 * @param iSegReg The index of the segment register to use for
6665 * this access. The base and limits are checked.
6666 * @param GCPtrMem The address of the guest memory.
6667 */
6668uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6669{
6670 /* The lazy approach for now... */
6671 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6672 uint8_t const bRet = *pu8Src;
6673 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6674 return bRet;
6675}
6676#endif /* IEM_WITH_SETJMP */
6677
6678
6679/**
6680 * Fetches a data word.
6681 *
6682 * @returns Strict VBox status code.
6683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6684 * @param pu16Dst Where to return the word.
6685 * @param iSegReg The index of the segment register to use for
6686 * this access. The base and limits are checked.
6687 * @param GCPtrMem The address of the guest memory.
6688 */
6689VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6690{
6691 /* The lazy approach for now... */
6692 uint16_t const *pu16Src;
6693 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6694 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6695 if (rc == VINF_SUCCESS)
6696 {
6697 *pu16Dst = *pu16Src;
6698 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6699 }
6700 return rc;
6701}
6702
6703
6704#ifdef IEM_WITH_SETJMP
6705/**
6706 * Fetches a data word, longjmp on error.
6707 *
6708 * @returns The word
6709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6710 * @param iSegReg The index of the segment register to use for
6711 * this access. The base and limits are checked.
6712 * @param GCPtrMem The address of the guest memory.
6713 */
6714uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6715{
6716 /* The lazy approach for now... */
6717 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6718 sizeof(*pu16Src) - 1);
6719 uint16_t const u16Ret = *pu16Src;
6720 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6721 return u16Ret;
6722}
6723#endif
6724
6725
6726/**
6727 * Fetches a data dword.
6728 *
6729 * @returns Strict VBox status code.
6730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6731 * @param pu32Dst Where to return the dword.
6732 * @param iSegReg The index of the segment register to use for
6733 * this access. The base and limits are checked.
6734 * @param GCPtrMem The address of the guest memory.
6735 */
6736VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6737{
6738 /* The lazy approach for now... */
6739 uint32_t const *pu32Src;
6740 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6741 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6742 if (rc == VINF_SUCCESS)
6743 {
6744 *pu32Dst = *pu32Src;
6745 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6746 }
6747 return rc;
6748}
6749
6750
6751/**
6752 * Fetches a data dword and zero extends it to a qword.
6753 *
6754 * @returns Strict VBox status code.
6755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6756 * @param pu64Dst Where to return the qword.
6757 * @param iSegReg The index of the segment register to use for
6758 * this access. The base and limits are checked.
6759 * @param GCPtrMem The address of the guest memory.
6760 */
6761VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6762{
6763 /* The lazy approach for now... */
6764 uint32_t const *pu32Src;
6765 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6766 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6767 if (rc == VINF_SUCCESS)
6768 {
6769 *pu64Dst = *pu32Src;
6770 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6771 }
6772 return rc;
6773}
6774
6775
6776#ifdef IEM_WITH_SETJMP
6777
6778/**
6779 * Fetches a data dword, longjmp on error, fallback/safe version.
6780 *
6781 * @returns The dword
6782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6783 * @param iSegReg The index of the segment register to use for
6784 * this access. The base and limits are checked.
6785 * @param GCPtrMem The address of the guest memory.
6786 */
6787uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6788{
6789 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6790 sizeof(*pu32Src) - 1);
6791 uint32_t const u32Ret = *pu32Src;
6792 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6793 return u32Ret;
6794}
6795
6796
6797/**
6798 * Fetches a data dword, longjmp on error.
6799 *
6800 * @returns The dword
6801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6802 * @param iSegReg The index of the segment register to use for
6803 * this access. The base and limits are checked.
6804 * @param GCPtrMem The address of the guest memory.
6805 */
6806uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6807{
6808# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6809 /*
6810 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6811 */
6812 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6813 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6814 {
6815 /*
6816 * TLB lookup.
6817 */
6818 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6819 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6820 if (pTlbe->uTag == uTag)
6821 {
6822 /*
6823 * Check TLB page table level access flags.
6824 */
6825 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6826 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6827 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6828 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6829 {
6830 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6831
6832 /*
6833 * Alignment check:
6834 */
6835 /** @todo check priority \#AC vs \#PF */
6836 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6837 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6838 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6839 || pVCpu->iem.s.uCpl != 3)
6840 {
6841 /*
6842 * Fetch and return the dword
6843 */
6844 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6845 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6846 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6847 }
6848 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6849 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6850 }
6851 }
6852 }
6853
6854 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6855 outdated page pointer, or other troubles. */
6856 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6857 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6858
6859# else
6860 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6861 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6862 uint32_t const u32Ret = *pu32Src;
6863 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6864 return u32Ret;
6865# endif
6866}
6867#endif
6868
6869
6870#ifdef SOME_UNUSED_FUNCTION
6871/**
6872 * Fetches a data dword and sign extends it to a qword.
6873 *
6874 * @returns Strict VBox status code.
6875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6876 * @param pu64Dst Where to return the sign extended value.
6877 * @param iSegReg The index of the segment register to use for
6878 * this access. The base and limits are checked.
6879 * @param GCPtrMem The address of the guest memory.
6880 */
6881VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6882{
6883 /* The lazy approach for now... */
6884 int32_t const *pi32Src;
6885 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6886 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6887 if (rc == VINF_SUCCESS)
6888 {
6889 *pu64Dst = *pi32Src;
6890 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6891 }
6892#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6893 else
6894 *pu64Dst = 0;
6895#endif
6896 return rc;
6897}
6898#endif
6899
6900
6901/**
6902 * Fetches a data qword.
6903 *
6904 * @returns Strict VBox status code.
6905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6906 * @param pu64Dst Where to return the qword.
6907 * @param iSegReg The index of the segment register to use for
6908 * this access. The base and limits are checked.
6909 * @param GCPtrMem The address of the guest memory.
6910 */
6911VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6912{
6913 /* The lazy approach for now... */
6914 uint64_t const *pu64Src;
6915 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6916 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6917 if (rc == VINF_SUCCESS)
6918 {
6919 *pu64Dst = *pu64Src;
6920 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6921 }
6922 return rc;
6923}
6924
6925
6926#ifdef IEM_WITH_SETJMP
6927/**
6928 * Fetches a data qword, longjmp on error.
6929 *
6930 * @returns The qword.
6931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6932 * @param iSegReg The index of the segment register to use for
6933 * this access. The base and limits are checked.
6934 * @param GCPtrMem The address of the guest memory.
6935 */
6936uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6937{
6938 /* The lazy approach for now... */
6939 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6940 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6941 uint64_t const u64Ret = *pu64Src;
6942 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6943 return u64Ret;
6944}
6945#endif
6946
6947
6948/**
6949 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6950 *
6951 * @returns Strict VBox status code.
6952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6953 * @param pu64Dst Where to return the qword.
6954 * @param iSegReg The index of the segment register to use for
6955 * this access. The base and limits are checked.
6956 * @param GCPtrMem The address of the guest memory.
6957 */
6958VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6959{
6960 /* The lazy approach for now... */
6961 uint64_t const *pu64Src;
6962 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6963 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6964 if (rc == VINF_SUCCESS)
6965 {
6966 *pu64Dst = *pu64Src;
6967 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6968 }
6969 return rc;
6970}
6971
6972
6973#ifdef IEM_WITH_SETJMP
6974/**
6975 * Fetches a data qword, longjmp on error.
6976 *
6977 * @returns The qword.
6978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6979 * @param iSegReg The index of the segment register to use for
6980 * this access. The base and limits are checked.
6981 * @param GCPtrMem The address of the guest memory.
6982 */
6983uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6984{
6985 /* The lazy approach for now... */
6986 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6987 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6988 uint64_t const u64Ret = *pu64Src;
6989 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6990 return u64Ret;
6991}
6992#endif
6993
6994
6995/**
6996 * Fetches a data tword.
6997 *
6998 * @returns Strict VBox status code.
6999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7000 * @param pr80Dst Where to return the tword.
7001 * @param iSegReg The index of the segment register to use for
7002 * this access. The base and limits are checked.
7003 * @param GCPtrMem The address of the guest memory.
7004 */
7005VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7006{
7007 /* The lazy approach for now... */
7008 PCRTFLOAT80U pr80Src;
7009 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7010 if (rc == VINF_SUCCESS)
7011 {
7012 *pr80Dst = *pr80Src;
7013 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7014 }
7015 return rc;
7016}
7017
7018
7019#ifdef IEM_WITH_SETJMP
7020/**
7021 * Fetches a data tword, longjmp on error.
7022 *
7023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7024 * @param pr80Dst Where to return the tword.
7025 * @param iSegReg The index of the segment register to use for
7026 * this access. The base and limits are checked.
7027 * @param GCPtrMem The address of the guest memory.
7028 */
7029void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7030{
7031 /* The lazy approach for now... */
7032 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7033 *pr80Dst = *pr80Src;
7034 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7035}
7036#endif
7037
7038
7039/**
7040 * Fetches a data decimal tword.
7041 *
7042 * @returns Strict VBox status code.
7043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7044 * @param pd80Dst Where to return the tword.
7045 * @param iSegReg The index of the segment register to use for
7046 * this access. The base and limits are checked.
7047 * @param GCPtrMem The address of the guest memory.
7048 */
7049VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7050{
7051 /* The lazy approach for now... */
7052 PCRTPBCD80U pd80Src;
7053 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7054 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7055 if (rc == VINF_SUCCESS)
7056 {
7057 *pd80Dst = *pd80Src;
7058 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7059 }
7060 return rc;
7061}
7062
7063
7064#ifdef IEM_WITH_SETJMP
7065/**
7066 * Fetches a data decimal tword, longjmp on error.
7067 *
7068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7069 * @param pd80Dst Where to return the tword.
7070 * @param iSegReg The index of the segment register to use for
7071 * this access. The base and limits are checked.
7072 * @param GCPtrMem The address of the guest memory.
7073 */
7074void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7075{
7076 /* The lazy approach for now... */
7077 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7078 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7079 *pd80Dst = *pd80Src;
7080 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7081}
7082#endif
7083
7084
7085/**
7086 * Fetches a data dqword (double qword), generally SSE related.
7087 *
7088 * @returns Strict VBox status code.
7089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7090 * @param pu128Dst Where to return the qword.
7091 * @param iSegReg The index of the segment register to use for
7092 * this access. The base and limits are checked.
7093 * @param GCPtrMem The address of the guest memory.
7094 */
7095VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7096{
7097 /* The lazy approach for now... */
7098 PCRTUINT128U pu128Src;
7099 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7100 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7101 if (rc == VINF_SUCCESS)
7102 {
7103 pu128Dst->au64[0] = pu128Src->au64[0];
7104 pu128Dst->au64[1] = pu128Src->au64[1];
7105 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7106 }
7107 return rc;
7108}
7109
7110
7111#ifdef IEM_WITH_SETJMP
7112/**
7113 * Fetches a data dqword (double qword), generally SSE related.
7114 *
7115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7116 * @param pu128Dst Where to return the qword.
7117 * @param iSegReg The index of the segment register to use for
7118 * this access. The base and limits are checked.
7119 * @param GCPtrMem The address of the guest memory.
7120 */
7121void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7122{
7123 /* The lazy approach for now... */
7124 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7125 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7126 pu128Dst->au64[0] = pu128Src->au64[0];
7127 pu128Dst->au64[1] = pu128Src->au64[1];
7128 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7129}
7130#endif
7131
7132
7133/**
7134 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7135 * related.
7136 *
7137 * Raises \#GP(0) if not aligned.
7138 *
7139 * @returns Strict VBox status code.
7140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7141 * @param pu128Dst Where to return the qword.
7142 * @param iSegReg The index of the segment register to use for
7143 * this access. The base and limits are checked.
7144 * @param GCPtrMem The address of the guest memory.
7145 */
7146VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7147{
7148 /* The lazy approach for now... */
7149 PCRTUINT128U pu128Src;
7150 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7151 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7152 if (rc == VINF_SUCCESS)
7153 {
7154 pu128Dst->au64[0] = pu128Src->au64[0];
7155 pu128Dst->au64[1] = pu128Src->au64[1];
7156 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7157 }
7158 return rc;
7159}
7160
7161
7162#ifdef IEM_WITH_SETJMP
7163/**
7164 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7165 * related, longjmp on error.
7166 *
7167 * Raises \#GP(0) if not aligned.
7168 *
7169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7170 * @param pu128Dst Where to return the qword.
7171 * @param iSegReg The index of the segment register to use for
7172 * this access. The base and limits are checked.
7173 * @param GCPtrMem The address of the guest memory.
7174 */
7175void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7176{
7177 /* The lazy approach for now... */
7178 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7179 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7180 pu128Dst->au64[0] = pu128Src->au64[0];
7181 pu128Dst->au64[1] = pu128Src->au64[1];
7182 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7183}
7184#endif
7185
7186
7187/**
7188 * Fetches a data oword (octo word), generally AVX related.
7189 *
7190 * @returns Strict VBox status code.
7191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7192 * @param pu256Dst Where to return the qword.
7193 * @param iSegReg The index of the segment register to use for
7194 * this access. The base and limits are checked.
7195 * @param GCPtrMem The address of the guest memory.
7196 */
7197VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7198{
7199 /* The lazy approach for now... */
7200 PCRTUINT256U pu256Src;
7201 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7202 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7203 if (rc == VINF_SUCCESS)
7204 {
7205 pu256Dst->au64[0] = pu256Src->au64[0];
7206 pu256Dst->au64[1] = pu256Src->au64[1];
7207 pu256Dst->au64[2] = pu256Src->au64[2];
7208 pu256Dst->au64[3] = pu256Src->au64[3];
7209 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7210 }
7211 return rc;
7212}
7213
7214
7215#ifdef IEM_WITH_SETJMP
7216/**
7217 * Fetches a data oword (octo word), generally AVX related.
7218 *
7219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7220 * @param pu256Dst Where to return the qword.
7221 * @param iSegReg The index of the segment register to use for
7222 * this access. The base and limits are checked.
7223 * @param GCPtrMem The address of the guest memory.
7224 */
7225void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7226{
7227 /* The lazy approach for now... */
7228 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7229 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7230 pu256Dst->au64[0] = pu256Src->au64[0];
7231 pu256Dst->au64[1] = pu256Src->au64[1];
7232 pu256Dst->au64[2] = pu256Src->au64[2];
7233 pu256Dst->au64[3] = pu256Src->au64[3];
7234 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7235}
7236#endif
7237
7238
7239/**
7240 * Fetches a data oword (octo word) at an aligned address, generally AVX
7241 * related.
7242 *
7243 * Raises \#GP(0) if not aligned.
7244 *
7245 * @returns Strict VBox status code.
7246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7247 * @param pu256Dst Where to return the qword.
7248 * @param iSegReg The index of the segment register to use for
7249 * this access. The base and limits are checked.
7250 * @param GCPtrMem The address of the guest memory.
7251 */
7252VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7253{
7254 /* The lazy approach for now... */
7255 PCRTUINT256U pu256Src;
7256 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7257 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7258 if (rc == VINF_SUCCESS)
7259 {
7260 pu256Dst->au64[0] = pu256Src->au64[0];
7261 pu256Dst->au64[1] = pu256Src->au64[1];
7262 pu256Dst->au64[2] = pu256Src->au64[2];
7263 pu256Dst->au64[3] = pu256Src->au64[3];
7264 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7265 }
7266 return rc;
7267}
7268
7269
7270#ifdef IEM_WITH_SETJMP
7271/**
7272 * Fetches a data oword (octo word) at an aligned address, generally AVX
7273 * related, longjmp on error.
7274 *
7275 * Raises \#GP(0) if not aligned.
7276 *
7277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7278 * @param pu256Dst Where to return the qword.
7279 * @param iSegReg The index of the segment register to use for
7280 * this access. The base and limits are checked.
7281 * @param GCPtrMem The address of the guest memory.
7282 */
7283void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7284{
7285 /* The lazy approach for now... */
7286 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7287 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7288 pu256Dst->au64[0] = pu256Src->au64[0];
7289 pu256Dst->au64[1] = pu256Src->au64[1];
7290 pu256Dst->au64[2] = pu256Src->au64[2];
7291 pu256Dst->au64[3] = pu256Src->au64[3];
7292 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7293}
7294#endif
7295
7296
7297
7298/**
7299 * Fetches a descriptor register (lgdt, lidt).
7300 *
7301 * @returns Strict VBox status code.
7302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7303 * @param pcbLimit Where to return the limit.
7304 * @param pGCPtrBase Where to return the base.
7305 * @param iSegReg The index of the segment register to use for
7306 * this access. The base and limits are checked.
7307 * @param GCPtrMem The address of the guest memory.
7308 * @param enmOpSize The effective operand size.
7309 */
7310VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7311 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7312{
7313 /*
7314 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7315 * little special:
7316 * - The two reads are done separately.
7317 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7318 * - We suspect the 386 to actually commit the limit before the base in
7319 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7320 * don't try emulate this eccentric behavior, because it's not well
7321 * enough understood and rather hard to trigger.
7322 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7323 */
7324 VBOXSTRICTRC rcStrict;
7325 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7326 {
7327 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7328 if (rcStrict == VINF_SUCCESS)
7329 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7330 }
7331 else
7332 {
7333 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7334 if (enmOpSize == IEMMODE_32BIT)
7335 {
7336 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7337 {
7338 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7339 if (rcStrict == VINF_SUCCESS)
7340 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7341 }
7342 else
7343 {
7344 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7345 if (rcStrict == VINF_SUCCESS)
7346 {
7347 *pcbLimit = (uint16_t)uTmp;
7348 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7349 }
7350 }
7351 if (rcStrict == VINF_SUCCESS)
7352 *pGCPtrBase = uTmp;
7353 }
7354 else
7355 {
7356 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7357 if (rcStrict == VINF_SUCCESS)
7358 {
7359 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7360 if (rcStrict == VINF_SUCCESS)
7361 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7362 }
7363 }
7364 }
7365 return rcStrict;
7366}
7367
7368
7369
7370/**
7371 * Stores a data byte.
7372 *
7373 * @returns Strict VBox status code.
7374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7375 * @param iSegReg The index of the segment register to use for
7376 * this access. The base and limits are checked.
7377 * @param GCPtrMem The address of the guest memory.
7378 * @param u8Value The value to store.
7379 */
7380VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7381{
7382 /* The lazy approach for now... */
7383 uint8_t *pu8Dst;
7384 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7385 if (rc == VINF_SUCCESS)
7386 {
7387 *pu8Dst = u8Value;
7388 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7389 }
7390 return rc;
7391}
7392
7393
7394#ifdef IEM_WITH_SETJMP
7395/**
7396 * Stores a data byte, longjmp on error.
7397 *
7398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7399 * @param iSegReg The index of the segment register to use for
7400 * this access. The base and limits are checked.
7401 * @param GCPtrMem The address of the guest memory.
7402 * @param u8Value The value to store.
7403 */
7404void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7405{
7406 /* The lazy approach for now... */
7407 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7408 *pu8Dst = u8Value;
7409 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7410}
7411#endif
7412
7413
7414/**
7415 * Stores a data word.
7416 *
7417 * @returns Strict VBox status code.
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param iSegReg The index of the segment register to use for
7420 * this access. The base and limits are checked.
7421 * @param GCPtrMem The address of the guest memory.
7422 * @param u16Value The value to store.
7423 */
7424VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7425{
7426 /* The lazy approach for now... */
7427 uint16_t *pu16Dst;
7428 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7429 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7430 if (rc == VINF_SUCCESS)
7431 {
7432 *pu16Dst = u16Value;
7433 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7434 }
7435 return rc;
7436}
7437
7438
7439#ifdef IEM_WITH_SETJMP
7440/**
7441 * Stores a data word, longjmp on error.
7442 *
7443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7444 * @param iSegReg The index of the segment register to use for
7445 * this access. The base and limits are checked.
7446 * @param GCPtrMem The address of the guest memory.
7447 * @param u16Value The value to store.
7448 */
7449void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7450{
7451 /* The lazy approach for now... */
7452 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7453 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7454 *pu16Dst = u16Value;
7455 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7456}
7457#endif
7458
7459
7460/**
7461 * Stores a data dword.
7462 *
7463 * @returns Strict VBox status code.
7464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7465 * @param iSegReg The index of the segment register to use for
7466 * this access. The base and limits are checked.
7467 * @param GCPtrMem The address of the guest memory.
7468 * @param u32Value The value to store.
7469 */
7470VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7471{
7472 /* The lazy approach for now... */
7473 uint32_t *pu32Dst;
7474 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7475 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7476 if (rc == VINF_SUCCESS)
7477 {
7478 *pu32Dst = u32Value;
7479 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7480 }
7481 return rc;
7482}
7483
7484
7485#ifdef IEM_WITH_SETJMP
7486/**
7487 * Stores a data dword.
7488 *
7489 * @returns Strict VBox status code.
7490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7491 * @param iSegReg The index of the segment register to use for
7492 * this access. The base and limits are checked.
7493 * @param GCPtrMem The address of the guest memory.
7494 * @param u32Value The value to store.
7495 */
7496void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7497{
7498 /* The lazy approach for now... */
7499 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7500 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7501 *pu32Dst = u32Value;
7502 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7503}
7504#endif
7505
7506
7507/**
7508 * Stores a data qword.
7509 *
7510 * @returns Strict VBox status code.
7511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7512 * @param iSegReg The index of the segment register to use for
7513 * this access. The base and limits are checked.
7514 * @param GCPtrMem The address of the guest memory.
7515 * @param u64Value The value to store.
7516 */
7517VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7518{
7519 /* The lazy approach for now... */
7520 uint64_t *pu64Dst;
7521 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7522 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7523 if (rc == VINF_SUCCESS)
7524 {
7525 *pu64Dst = u64Value;
7526 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7527 }
7528 return rc;
7529}
7530
7531
7532#ifdef IEM_WITH_SETJMP
7533/**
7534 * Stores a data qword, longjmp on error.
7535 *
7536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7537 * @param iSegReg The index of the segment register to use for
7538 * this access. The base and limits are checked.
7539 * @param GCPtrMem The address of the guest memory.
7540 * @param u64Value The value to store.
7541 */
7542void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7543{
7544 /* The lazy approach for now... */
7545 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7546 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7547 *pu64Dst = u64Value;
7548 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7549}
7550#endif
7551
7552
7553/**
7554 * Stores a data dqword.
7555 *
7556 * @returns Strict VBox status code.
7557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7558 * @param iSegReg The index of the segment register to use for
7559 * this access. The base and limits are checked.
7560 * @param GCPtrMem The address of the guest memory.
7561 * @param u128Value The value to store.
7562 */
7563VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7564{
7565 /* The lazy approach for now... */
7566 PRTUINT128U pu128Dst;
7567 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7568 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7569 if (rc == VINF_SUCCESS)
7570 {
7571 pu128Dst->au64[0] = u128Value.au64[0];
7572 pu128Dst->au64[1] = u128Value.au64[1];
7573 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7574 }
7575 return rc;
7576}
7577
7578
7579#ifdef IEM_WITH_SETJMP
7580/**
7581 * Stores a data dqword, longjmp on error.
7582 *
7583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7584 * @param iSegReg The index of the segment register to use for
7585 * this access. The base and limits are checked.
7586 * @param GCPtrMem The address of the guest memory.
7587 * @param u128Value The value to store.
7588 */
7589void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7590{
7591 /* The lazy approach for now... */
7592 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7593 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7594 pu128Dst->au64[0] = u128Value.au64[0];
7595 pu128Dst->au64[1] = u128Value.au64[1];
7596 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7597}
7598#endif
7599
7600
7601/**
7602 * Stores a data dqword, SSE aligned.
7603 *
7604 * @returns Strict VBox status code.
7605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7606 * @param iSegReg The index of the segment register to use for
7607 * this access. The base and limits are checked.
7608 * @param GCPtrMem The address of the guest memory.
7609 * @param u128Value The value to store.
7610 */
7611VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7612{
7613 /* The lazy approach for now... */
7614 PRTUINT128U pu128Dst;
7615 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7616 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7617 if (rc == VINF_SUCCESS)
7618 {
7619 pu128Dst->au64[0] = u128Value.au64[0];
7620 pu128Dst->au64[1] = u128Value.au64[1];
7621 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7622 }
7623 return rc;
7624}
7625
7626
7627#ifdef IEM_WITH_SETJMP
7628/**
7629 * Stores a data dqword, SSE aligned.
7630 *
7631 * @returns Strict VBox status code.
7632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7633 * @param iSegReg The index of the segment register to use for
7634 * this access. The base and limits are checked.
7635 * @param GCPtrMem The address of the guest memory.
7636 * @param u128Value The value to store.
7637 */
7638void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7639{
7640 /* The lazy approach for now... */
7641 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7642 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7643 pu128Dst->au64[0] = u128Value.au64[0];
7644 pu128Dst->au64[1] = u128Value.au64[1];
7645 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7646}
7647#endif
7648
7649
7650/**
7651 * Stores a data dqword.
7652 *
7653 * @returns Strict VBox status code.
7654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7655 * @param iSegReg The index of the segment register to use for
7656 * this access. The base and limits are checked.
7657 * @param GCPtrMem The address of the guest memory.
7658 * @param pu256Value Pointer to the value to store.
7659 */
7660VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7661{
7662 /* The lazy approach for now... */
7663 PRTUINT256U pu256Dst;
7664 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7665 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7666 if (rc == VINF_SUCCESS)
7667 {
7668 pu256Dst->au64[0] = pu256Value->au64[0];
7669 pu256Dst->au64[1] = pu256Value->au64[1];
7670 pu256Dst->au64[2] = pu256Value->au64[2];
7671 pu256Dst->au64[3] = pu256Value->au64[3];
7672 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7673 }
7674 return rc;
7675}
7676
7677
7678#ifdef IEM_WITH_SETJMP
7679/**
7680 * Stores a data dqword, longjmp on error.
7681 *
7682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7683 * @param iSegReg The index of the segment register to use for
7684 * this access. The base and limits are checked.
7685 * @param GCPtrMem The address of the guest memory.
7686 * @param pu256Value Pointer to the value to store.
7687 */
7688void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7689{
7690 /* The lazy approach for now... */
7691 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7692 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7693 pu256Dst->au64[0] = pu256Value->au64[0];
7694 pu256Dst->au64[1] = pu256Value->au64[1];
7695 pu256Dst->au64[2] = pu256Value->au64[2];
7696 pu256Dst->au64[3] = pu256Value->au64[3];
7697 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7698}
7699#endif
7700
7701
7702/**
7703 * Stores a data dqword, AVX \#GP(0) aligned.
7704 *
7705 * @returns Strict VBox status code.
7706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7707 * @param iSegReg The index of the segment register to use for
7708 * this access. The base and limits are checked.
7709 * @param GCPtrMem The address of the guest memory.
7710 * @param pu256Value Pointer to the value to store.
7711 */
7712VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7713{
7714 /* The lazy approach for now... */
7715 PRTUINT256U pu256Dst;
7716 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7717 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7718 if (rc == VINF_SUCCESS)
7719 {
7720 pu256Dst->au64[0] = pu256Value->au64[0];
7721 pu256Dst->au64[1] = pu256Value->au64[1];
7722 pu256Dst->au64[2] = pu256Value->au64[2];
7723 pu256Dst->au64[3] = pu256Value->au64[3];
7724 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7725 }
7726 return rc;
7727}
7728
7729
7730#ifdef IEM_WITH_SETJMP
7731/**
7732 * Stores a data dqword, AVX aligned.
7733 *
7734 * @returns Strict VBox status code.
7735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7736 * @param iSegReg The index of the segment register to use for
7737 * this access. The base and limits are checked.
7738 * @param GCPtrMem The address of the guest memory.
7739 * @param pu256Value Pointer to the value to store.
7740 */
7741void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7742{
7743 /* The lazy approach for now... */
7744 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7745 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7746 pu256Dst->au64[0] = pu256Value->au64[0];
7747 pu256Dst->au64[1] = pu256Value->au64[1];
7748 pu256Dst->au64[2] = pu256Value->au64[2];
7749 pu256Dst->au64[3] = pu256Value->au64[3];
7750 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7751}
7752#endif
7753
7754
7755/**
7756 * Stores a descriptor register (sgdt, sidt).
7757 *
7758 * @returns Strict VBox status code.
7759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7760 * @param cbLimit The limit.
7761 * @param GCPtrBase The base address.
7762 * @param iSegReg The index of the segment register to use for
7763 * this access. The base and limits are checked.
7764 * @param GCPtrMem The address of the guest memory.
7765 */
7766VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7767{
7768 /*
7769 * The SIDT and SGDT instructions actually stores the data using two
7770 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7771 * does not respond to opsize prefixes.
7772 */
7773 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7774 if (rcStrict == VINF_SUCCESS)
7775 {
7776 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7777 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7778 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7779 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7780 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7781 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7782 else
7783 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7784 }
7785 return rcStrict;
7786}
7787
7788
7789/**
7790 * Pushes a word onto the stack.
7791 *
7792 * @returns Strict VBox status code.
7793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7794 * @param u16Value The value to push.
7795 */
7796VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7797{
7798 /* Increment the stack pointer. */
7799 uint64_t uNewRsp;
7800 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7801
7802 /* Write the word the lazy way. */
7803 uint16_t *pu16Dst;
7804 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7805 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7806 if (rc == VINF_SUCCESS)
7807 {
7808 *pu16Dst = u16Value;
7809 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7810 }
7811
7812 /* Commit the new RSP value unless we an access handler made trouble. */
7813 if (rc == VINF_SUCCESS)
7814 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7815
7816 return rc;
7817}
7818
7819
7820/**
7821 * Pushes a dword onto the stack.
7822 *
7823 * @returns Strict VBox status code.
7824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7825 * @param u32Value The value to push.
7826 */
7827VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7828{
7829 /* Increment the stack pointer. */
7830 uint64_t uNewRsp;
7831 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7832
7833 /* Write the dword the lazy way. */
7834 uint32_t *pu32Dst;
7835 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7836 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7837 if (rc == VINF_SUCCESS)
7838 {
7839 *pu32Dst = u32Value;
7840 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7841 }
7842
7843 /* Commit the new RSP value unless we an access handler made trouble. */
7844 if (rc == VINF_SUCCESS)
7845 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7846
7847 return rc;
7848}
7849
7850
7851/**
7852 * Pushes a dword segment register value onto the stack.
7853 *
7854 * @returns Strict VBox status code.
7855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7856 * @param u32Value The value to push.
7857 */
7858VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7859{
7860 /* Increment the stack pointer. */
7861 uint64_t uNewRsp;
7862 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7863
7864 /* The intel docs talks about zero extending the selector register
7865 value. My actual intel CPU here might be zero extending the value
7866 but it still only writes the lower word... */
7867 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7868 * happens when crossing an electric page boundrary, is the high word checked
7869 * for write accessibility or not? Probably it is. What about segment limits?
7870 * It appears this behavior is also shared with trap error codes.
7871 *
7872 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7873 * ancient hardware when it actually did change. */
7874 uint16_t *pu16Dst;
7875 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7876 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7877 if (rc == VINF_SUCCESS)
7878 {
7879 *pu16Dst = (uint16_t)u32Value;
7880 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7881 }
7882
7883 /* Commit the new RSP value unless we an access handler made trouble. */
7884 if (rc == VINF_SUCCESS)
7885 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7886
7887 return rc;
7888}
7889
7890
7891/**
7892 * Pushes a qword onto the stack.
7893 *
7894 * @returns Strict VBox status code.
7895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7896 * @param u64Value The value to push.
7897 */
7898VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7899{
7900 /* Increment the stack pointer. */
7901 uint64_t uNewRsp;
7902 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7903
7904 /* Write the word the lazy way. */
7905 uint64_t *pu64Dst;
7906 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7907 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7908 if (rc == VINF_SUCCESS)
7909 {
7910 *pu64Dst = u64Value;
7911 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7912 }
7913
7914 /* Commit the new RSP value unless we an access handler made trouble. */
7915 if (rc == VINF_SUCCESS)
7916 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7917
7918 return rc;
7919}
7920
7921
7922/**
7923 * Pops a word from the stack.
7924 *
7925 * @returns Strict VBox status code.
7926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7927 * @param pu16Value Where to store the popped value.
7928 */
7929VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7930{
7931 /* Increment the stack pointer. */
7932 uint64_t uNewRsp;
7933 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7934
7935 /* Write the word the lazy way. */
7936 uint16_t const *pu16Src;
7937 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7938 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7939 if (rc == VINF_SUCCESS)
7940 {
7941 *pu16Value = *pu16Src;
7942 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7943
7944 /* Commit the new RSP value. */
7945 if (rc == VINF_SUCCESS)
7946 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7947 }
7948
7949 return rc;
7950}
7951
7952
7953/**
7954 * Pops a dword from the stack.
7955 *
7956 * @returns Strict VBox status code.
7957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7958 * @param pu32Value Where to store the popped value.
7959 */
7960VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7961{
7962 /* Increment the stack pointer. */
7963 uint64_t uNewRsp;
7964 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7965
7966 /* Write the word the lazy way. */
7967 uint32_t const *pu32Src;
7968 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
7969 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
7970 if (rc == VINF_SUCCESS)
7971 {
7972 *pu32Value = *pu32Src;
7973 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7974
7975 /* Commit the new RSP value. */
7976 if (rc == VINF_SUCCESS)
7977 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7978 }
7979
7980 return rc;
7981}
7982
7983
7984/**
7985 * Pops a qword from the stack.
7986 *
7987 * @returns Strict VBox status code.
7988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7989 * @param pu64Value Where to store the popped value.
7990 */
7991VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7992{
7993 /* Increment the stack pointer. */
7994 uint64_t uNewRsp;
7995 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7996
7997 /* Write the word the lazy way. */
7998 uint64_t const *pu64Src;
7999 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8000 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8001 if (rc == VINF_SUCCESS)
8002 {
8003 *pu64Value = *pu64Src;
8004 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8005
8006 /* Commit the new RSP value. */
8007 if (rc == VINF_SUCCESS)
8008 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8009 }
8010
8011 return rc;
8012}
8013
8014
8015/**
8016 * Pushes a word onto the stack, using a temporary stack pointer.
8017 *
8018 * @returns Strict VBox status code.
8019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8020 * @param u16Value The value to push.
8021 * @param pTmpRsp Pointer to the temporary stack pointer.
8022 */
8023VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8024{
8025 /* Increment the stack pointer. */
8026 RTUINT64U NewRsp = *pTmpRsp;
8027 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8028
8029 /* Write the word the lazy way. */
8030 uint16_t *pu16Dst;
8031 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8032 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8033 if (rc == VINF_SUCCESS)
8034 {
8035 *pu16Dst = u16Value;
8036 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8037 }
8038
8039 /* Commit the new RSP value unless we an access handler made trouble. */
8040 if (rc == VINF_SUCCESS)
8041 *pTmpRsp = NewRsp;
8042
8043 return rc;
8044}
8045
8046
8047/**
8048 * Pushes a dword onto the stack, using a temporary stack pointer.
8049 *
8050 * @returns Strict VBox status code.
8051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8052 * @param u32Value The value to push.
8053 * @param pTmpRsp Pointer to the temporary stack pointer.
8054 */
8055VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8056{
8057 /* Increment the stack pointer. */
8058 RTUINT64U NewRsp = *pTmpRsp;
8059 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8060
8061 /* Write the word the lazy way. */
8062 uint32_t *pu32Dst;
8063 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8064 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8065 if (rc == VINF_SUCCESS)
8066 {
8067 *pu32Dst = u32Value;
8068 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8069 }
8070
8071 /* Commit the new RSP value unless we an access handler made trouble. */
8072 if (rc == VINF_SUCCESS)
8073 *pTmpRsp = NewRsp;
8074
8075 return rc;
8076}
8077
8078
8079/**
8080 * Pushes a dword onto the stack, using a temporary stack pointer.
8081 *
8082 * @returns Strict VBox status code.
8083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8084 * @param u64Value The value to push.
8085 * @param pTmpRsp Pointer to the temporary stack pointer.
8086 */
8087VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8088{
8089 /* Increment the stack pointer. */
8090 RTUINT64U NewRsp = *pTmpRsp;
8091 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8092
8093 /* Write the word the lazy way. */
8094 uint64_t *pu64Dst;
8095 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8096 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8097 if (rc == VINF_SUCCESS)
8098 {
8099 *pu64Dst = u64Value;
8100 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8101 }
8102
8103 /* Commit the new RSP value unless we an access handler made trouble. */
8104 if (rc == VINF_SUCCESS)
8105 *pTmpRsp = NewRsp;
8106
8107 return rc;
8108}
8109
8110
8111/**
8112 * Pops a word from the stack, using a temporary stack pointer.
8113 *
8114 * @returns Strict VBox status code.
8115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8116 * @param pu16Value Where to store the popped value.
8117 * @param pTmpRsp Pointer to the temporary stack pointer.
8118 */
8119VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8120{
8121 /* Increment the stack pointer. */
8122 RTUINT64U NewRsp = *pTmpRsp;
8123 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8124
8125 /* Write the word the lazy way. */
8126 uint16_t const *pu16Src;
8127 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8128 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8129 if (rc == VINF_SUCCESS)
8130 {
8131 *pu16Value = *pu16Src;
8132 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8133
8134 /* Commit the new RSP value. */
8135 if (rc == VINF_SUCCESS)
8136 *pTmpRsp = NewRsp;
8137 }
8138
8139 return rc;
8140}
8141
8142
8143/**
8144 * Pops a dword from the stack, using a temporary stack pointer.
8145 *
8146 * @returns Strict VBox status code.
8147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8148 * @param pu32Value Where to store the popped value.
8149 * @param pTmpRsp Pointer to the temporary stack pointer.
8150 */
8151VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8152{
8153 /* Increment the stack pointer. */
8154 RTUINT64U NewRsp = *pTmpRsp;
8155 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8156
8157 /* Write the word the lazy way. */
8158 uint32_t const *pu32Src;
8159 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8160 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8161 if (rc == VINF_SUCCESS)
8162 {
8163 *pu32Value = *pu32Src;
8164 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8165
8166 /* Commit the new RSP value. */
8167 if (rc == VINF_SUCCESS)
8168 *pTmpRsp = NewRsp;
8169 }
8170
8171 return rc;
8172}
8173
8174
8175/**
8176 * Pops a qword from the stack, using a temporary stack pointer.
8177 *
8178 * @returns Strict VBox status code.
8179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8180 * @param pu64Value Where to store the popped value.
8181 * @param pTmpRsp Pointer to the temporary stack pointer.
8182 */
8183VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8184{
8185 /* Increment the stack pointer. */
8186 RTUINT64U NewRsp = *pTmpRsp;
8187 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8188
8189 /* Write the word the lazy way. */
8190 uint64_t const *pu64Src;
8191 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8192 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8193 if (rcStrict == VINF_SUCCESS)
8194 {
8195 *pu64Value = *pu64Src;
8196 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8197
8198 /* Commit the new RSP value. */
8199 if (rcStrict == VINF_SUCCESS)
8200 *pTmpRsp = NewRsp;
8201 }
8202
8203 return rcStrict;
8204}
8205
8206
8207/**
8208 * Begin a special stack push (used by interrupt, exceptions and such).
8209 *
8210 * This will raise \#SS or \#PF if appropriate.
8211 *
8212 * @returns Strict VBox status code.
8213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8214 * @param cbMem The number of bytes to push onto the stack.
8215 * @param cbAlign The alignment mask (7, 3, 1).
8216 * @param ppvMem Where to return the pointer to the stack memory.
8217 * As with the other memory functions this could be
8218 * direct access or bounce buffered access, so
8219 * don't commit register until the commit call
8220 * succeeds.
8221 * @param puNewRsp Where to return the new RSP value. This must be
8222 * passed unchanged to
8223 * iemMemStackPushCommitSpecial().
8224 */
8225VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8226 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8227{
8228 Assert(cbMem < UINT8_MAX);
8229 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8230 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8231 IEM_ACCESS_STACK_W, cbAlign);
8232}
8233
8234
8235/**
8236 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8237 *
8238 * This will update the rSP.
8239 *
8240 * @returns Strict VBox status code.
8241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8242 * @param pvMem The pointer returned by
8243 * iemMemStackPushBeginSpecial().
8244 * @param uNewRsp The new RSP value returned by
8245 * iemMemStackPushBeginSpecial().
8246 */
8247VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8248{
8249 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8250 if (rcStrict == VINF_SUCCESS)
8251 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8252 return rcStrict;
8253}
8254
8255
8256/**
8257 * Begin a special stack pop (used by iret, retf and such).
8258 *
8259 * This will raise \#SS or \#PF if appropriate.
8260 *
8261 * @returns Strict VBox status code.
8262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8263 * @param cbMem The number of bytes to pop from the stack.
8264 * @param cbAlign The alignment mask (7, 3, 1).
8265 * @param ppvMem Where to return the pointer to the stack memory.
8266 * @param puNewRsp Where to return the new RSP value. This must be
8267 * assigned to CPUMCTX::rsp manually some time
8268 * after iemMemStackPopDoneSpecial() has been
8269 * called.
8270 */
8271VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8272 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8273{
8274 Assert(cbMem < UINT8_MAX);
8275 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8276 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8277}
8278
8279
8280/**
8281 * Continue a special stack pop (used by iret and retf), for the purpose of
8282 * retrieving a new stack pointer.
8283 *
8284 * This will raise \#SS or \#PF if appropriate.
8285 *
8286 * @returns Strict VBox status code.
8287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8288 * @param off Offset from the top of the stack. This is zero
8289 * except in the retf case.
8290 * @param cbMem The number of bytes to pop from the stack.
8291 * @param ppvMem Where to return the pointer to the stack memory.
8292 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8293 * return this because all use of this function is
8294 * to retrieve a new value and anything we return
8295 * here would be discarded.)
8296 */
8297VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8298 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8299{
8300 Assert(cbMem < UINT8_MAX);
8301
8302 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8303 RTGCPTR GCPtrTop;
8304 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8305 GCPtrTop = uCurNewRsp;
8306 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8307 GCPtrTop = (uint32_t)uCurNewRsp;
8308 else
8309 GCPtrTop = (uint16_t)uCurNewRsp;
8310
8311 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8312 0 /* checked in iemMemStackPopBeginSpecial */);
8313}
8314
8315
8316/**
8317 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8318 * iemMemStackPopContinueSpecial).
8319 *
8320 * The caller will manually commit the rSP.
8321 *
8322 * @returns Strict VBox status code.
8323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8324 * @param pvMem The pointer returned by
8325 * iemMemStackPopBeginSpecial() or
8326 * iemMemStackPopContinueSpecial().
8327 */
8328VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8329{
8330 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8331}
8332
8333
8334/**
8335 * Fetches a system table byte.
8336 *
8337 * @returns Strict VBox status code.
8338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8339 * @param pbDst Where to return the byte.
8340 * @param iSegReg The index of the segment register to use for
8341 * this access. The base and limits are checked.
8342 * @param GCPtrMem The address of the guest memory.
8343 */
8344VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8345{
8346 /* The lazy approach for now... */
8347 uint8_t const *pbSrc;
8348 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8349 if (rc == VINF_SUCCESS)
8350 {
8351 *pbDst = *pbSrc;
8352 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8353 }
8354 return rc;
8355}
8356
8357
8358/**
8359 * Fetches a system table word.
8360 *
8361 * @returns Strict VBox status code.
8362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8363 * @param pu16Dst Where to return the word.
8364 * @param iSegReg The index of the segment register to use for
8365 * this access. The base and limits are checked.
8366 * @param GCPtrMem The address of the guest memory.
8367 */
8368VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8369{
8370 /* The lazy approach for now... */
8371 uint16_t const *pu16Src;
8372 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8373 if (rc == VINF_SUCCESS)
8374 {
8375 *pu16Dst = *pu16Src;
8376 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8377 }
8378 return rc;
8379}
8380
8381
8382/**
8383 * Fetches a system table dword.
8384 *
8385 * @returns Strict VBox status code.
8386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8387 * @param pu32Dst Where to return the dword.
8388 * @param iSegReg The index of the segment register to use for
8389 * this access. The base and limits are checked.
8390 * @param GCPtrMem The address of the guest memory.
8391 */
8392VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8393{
8394 /* The lazy approach for now... */
8395 uint32_t const *pu32Src;
8396 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8397 if (rc == VINF_SUCCESS)
8398 {
8399 *pu32Dst = *pu32Src;
8400 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8401 }
8402 return rc;
8403}
8404
8405
8406/**
8407 * Fetches a system table qword.
8408 *
8409 * @returns Strict VBox status code.
8410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8411 * @param pu64Dst Where to return the qword.
8412 * @param iSegReg The index of the segment register to use for
8413 * this access. The base and limits are checked.
8414 * @param GCPtrMem The address of the guest memory.
8415 */
8416VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8417{
8418 /* The lazy approach for now... */
8419 uint64_t const *pu64Src;
8420 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8421 if (rc == VINF_SUCCESS)
8422 {
8423 *pu64Dst = *pu64Src;
8424 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8425 }
8426 return rc;
8427}
8428
8429
8430/**
8431 * Fetches a descriptor table entry with caller specified error code.
8432 *
8433 * @returns Strict VBox status code.
8434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8435 * @param pDesc Where to return the descriptor table entry.
8436 * @param uSel The selector which table entry to fetch.
8437 * @param uXcpt The exception to raise on table lookup error.
8438 * @param uErrorCode The error code associated with the exception.
8439 */
8440static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8441 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8442{
8443 AssertPtr(pDesc);
8444 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8445
8446 /** @todo did the 286 require all 8 bytes to be accessible? */
8447 /*
8448 * Get the selector table base and check bounds.
8449 */
8450 RTGCPTR GCPtrBase;
8451 if (uSel & X86_SEL_LDT)
8452 {
8453 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8454 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8455 {
8456 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8457 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8458 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8459 uErrorCode, 0);
8460 }
8461
8462 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8463 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8464 }
8465 else
8466 {
8467 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8468 {
8469 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8470 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8471 uErrorCode, 0);
8472 }
8473 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8474 }
8475
8476 /*
8477 * Read the legacy descriptor and maybe the long mode extensions if
8478 * required.
8479 */
8480 VBOXSTRICTRC rcStrict;
8481 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8482 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8483 else
8484 {
8485 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8486 if (rcStrict == VINF_SUCCESS)
8487 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8488 if (rcStrict == VINF_SUCCESS)
8489 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8490 if (rcStrict == VINF_SUCCESS)
8491 pDesc->Legacy.au16[3] = 0;
8492 else
8493 return rcStrict;
8494 }
8495
8496 if (rcStrict == VINF_SUCCESS)
8497 {
8498 if ( !IEM_IS_LONG_MODE(pVCpu)
8499 || pDesc->Legacy.Gen.u1DescType)
8500 pDesc->Long.au64[1] = 0;
8501 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8502 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8503 else
8504 {
8505 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8506 /** @todo is this the right exception? */
8507 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8508 }
8509 }
8510 return rcStrict;
8511}
8512
8513
8514/**
8515 * Fetches a descriptor table entry.
8516 *
8517 * @returns Strict VBox status code.
8518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8519 * @param pDesc Where to return the descriptor table entry.
8520 * @param uSel The selector which table entry to fetch.
8521 * @param uXcpt The exception to raise on table lookup error.
8522 */
8523VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8524{
8525 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8526}
8527
8528
8529/**
8530 * Marks the selector descriptor as accessed (only non-system descriptors).
8531 *
8532 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8533 * will therefore skip the limit checks.
8534 *
8535 * @returns Strict VBox status code.
8536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8537 * @param uSel The selector.
8538 */
8539VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8540{
8541 /*
8542 * Get the selector table base and calculate the entry address.
8543 */
8544 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8545 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8546 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8547 GCPtr += uSel & X86_SEL_MASK;
8548
8549 /*
8550 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8551 * ugly stuff to avoid this. This will make sure it's an atomic access
8552 * as well more or less remove any question about 8-bit or 32-bit accesss.
8553 */
8554 VBOXSTRICTRC rcStrict;
8555 uint32_t volatile *pu32;
8556 if ((GCPtr & 3) == 0)
8557 {
8558 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8559 GCPtr += 2 + 2;
8560 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8561 if (rcStrict != VINF_SUCCESS)
8562 return rcStrict;
8563 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8564 }
8565 else
8566 {
8567 /* The misaligned GDT/LDT case, map the whole thing. */
8568 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8569 if (rcStrict != VINF_SUCCESS)
8570 return rcStrict;
8571 switch ((uintptr_t)pu32 & 3)
8572 {
8573 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8574 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8575 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8576 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8577 }
8578 }
8579
8580 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8581}
8582
8583/** @} */
8584
8585/** @name Opcode Helpers.
8586 * @{
8587 */
8588
8589/**
8590 * Calculates the effective address of a ModR/M memory operand.
8591 *
8592 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8593 *
8594 * @return Strict VBox status code.
8595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8596 * @param bRm The ModRM byte.
8597 * @param cbImm The size of any immediate following the
8598 * effective address opcode bytes. Important for
8599 * RIP relative addressing.
8600 * @param pGCPtrEff Where to return the effective address.
8601 */
8602VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8603{
8604 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8605# define SET_SS_DEF() \
8606 do \
8607 { \
8608 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8609 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8610 } while (0)
8611
8612 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8613 {
8614/** @todo Check the effective address size crap! */
8615 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8616 {
8617 uint16_t u16EffAddr;
8618
8619 /* Handle the disp16 form with no registers first. */
8620 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8621 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8622 else
8623 {
8624 /* Get the displacment. */
8625 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8626 {
8627 case 0: u16EffAddr = 0; break;
8628 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8629 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8630 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8631 }
8632
8633 /* Add the base and index registers to the disp. */
8634 switch (bRm & X86_MODRM_RM_MASK)
8635 {
8636 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8637 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8638 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8639 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8640 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8641 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8642 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8643 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8644 }
8645 }
8646
8647 *pGCPtrEff = u16EffAddr;
8648 }
8649 else
8650 {
8651 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8652 uint32_t u32EffAddr;
8653
8654 /* Handle the disp32 form with no registers first. */
8655 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8656 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8657 else
8658 {
8659 /* Get the register (or SIB) value. */
8660 switch ((bRm & X86_MODRM_RM_MASK))
8661 {
8662 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8663 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8664 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8665 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8666 case 4: /* SIB */
8667 {
8668 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8669
8670 /* Get the index and scale it. */
8671 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8672 {
8673 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8674 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8675 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8676 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8677 case 4: u32EffAddr = 0; /*none */ break;
8678 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8679 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8680 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8681 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8682 }
8683 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8684
8685 /* add base */
8686 switch (bSib & X86_SIB_BASE_MASK)
8687 {
8688 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8689 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8690 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8691 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8692 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8693 case 5:
8694 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8695 {
8696 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8697 SET_SS_DEF();
8698 }
8699 else
8700 {
8701 uint32_t u32Disp;
8702 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8703 u32EffAddr += u32Disp;
8704 }
8705 break;
8706 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8707 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8708 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8709 }
8710 break;
8711 }
8712 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8713 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8714 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8715 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8716 }
8717
8718 /* Get and add the displacement. */
8719 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8720 {
8721 case 0:
8722 break;
8723 case 1:
8724 {
8725 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8726 u32EffAddr += i8Disp;
8727 break;
8728 }
8729 case 2:
8730 {
8731 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8732 u32EffAddr += u32Disp;
8733 break;
8734 }
8735 default:
8736 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8737 }
8738
8739 }
8740 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8741 *pGCPtrEff = u32EffAddr;
8742 else
8743 {
8744 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8745 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8746 }
8747 }
8748 }
8749 else
8750 {
8751 uint64_t u64EffAddr;
8752
8753 /* Handle the rip+disp32 form with no registers first. */
8754 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8755 {
8756 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8757 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8758 }
8759 else
8760 {
8761 /* Get the register (or SIB) value. */
8762 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8763 {
8764 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8765 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8766 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8767 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8768 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8769 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8770 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8771 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8772 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8773 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8774 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8775 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8776 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8777 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8778 /* SIB */
8779 case 4:
8780 case 12:
8781 {
8782 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8783
8784 /* Get the index and scale it. */
8785 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8786 {
8787 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8788 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8789 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8790 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8791 case 4: u64EffAddr = 0; /*none */ break;
8792 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8793 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8794 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8795 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8796 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8797 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8798 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8799 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8800 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8801 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8802 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8803 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8804 }
8805 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8806
8807 /* add base */
8808 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8809 {
8810 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8811 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8812 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8813 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8814 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8815 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8816 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8817 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8818 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8819 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8820 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8821 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8822 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8823 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8824 /* complicated encodings */
8825 case 5:
8826 case 13:
8827 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8828 {
8829 if (!pVCpu->iem.s.uRexB)
8830 {
8831 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8832 SET_SS_DEF();
8833 }
8834 else
8835 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8836 }
8837 else
8838 {
8839 uint32_t u32Disp;
8840 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8841 u64EffAddr += (int32_t)u32Disp;
8842 }
8843 break;
8844 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8845 }
8846 break;
8847 }
8848 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8849 }
8850
8851 /* Get and add the displacement. */
8852 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8853 {
8854 case 0:
8855 break;
8856 case 1:
8857 {
8858 int8_t i8Disp;
8859 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8860 u64EffAddr += i8Disp;
8861 break;
8862 }
8863 case 2:
8864 {
8865 uint32_t u32Disp;
8866 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8867 u64EffAddr += (int32_t)u32Disp;
8868 break;
8869 }
8870 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8871 }
8872
8873 }
8874
8875 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8876 *pGCPtrEff = u64EffAddr;
8877 else
8878 {
8879 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8880 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8881 }
8882 }
8883
8884 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8885 return VINF_SUCCESS;
8886}
8887
8888
8889/**
8890 * Calculates the effective address of a ModR/M memory operand.
8891 *
8892 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8893 *
8894 * @return Strict VBox status code.
8895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8896 * @param bRm The ModRM byte.
8897 * @param cbImm The size of any immediate following the
8898 * effective address opcode bytes. Important for
8899 * RIP relative addressing.
8900 * @param pGCPtrEff Where to return the effective address.
8901 * @param offRsp RSP displacement.
8902 */
8903VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8904{
8905 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8906# define SET_SS_DEF() \
8907 do \
8908 { \
8909 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8910 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8911 } while (0)
8912
8913 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8914 {
8915/** @todo Check the effective address size crap! */
8916 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8917 {
8918 uint16_t u16EffAddr;
8919
8920 /* Handle the disp16 form with no registers first. */
8921 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8922 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8923 else
8924 {
8925 /* Get the displacment. */
8926 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8927 {
8928 case 0: u16EffAddr = 0; break;
8929 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8930 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8931 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8932 }
8933
8934 /* Add the base and index registers to the disp. */
8935 switch (bRm & X86_MODRM_RM_MASK)
8936 {
8937 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8938 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8939 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8940 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8941 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8942 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8943 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8944 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8945 }
8946 }
8947
8948 *pGCPtrEff = u16EffAddr;
8949 }
8950 else
8951 {
8952 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8953 uint32_t u32EffAddr;
8954
8955 /* Handle the disp32 form with no registers first. */
8956 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8957 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8958 else
8959 {
8960 /* Get the register (or SIB) value. */
8961 switch ((bRm & X86_MODRM_RM_MASK))
8962 {
8963 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8964 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8965 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8966 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8967 case 4: /* SIB */
8968 {
8969 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8970
8971 /* Get the index and scale it. */
8972 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8973 {
8974 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8975 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8976 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8977 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8978 case 4: u32EffAddr = 0; /*none */ break;
8979 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8980 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8981 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8982 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8983 }
8984 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8985
8986 /* add base */
8987 switch (bSib & X86_SIB_BASE_MASK)
8988 {
8989 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8990 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8991 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8992 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8993 case 4:
8994 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8995 SET_SS_DEF();
8996 break;
8997 case 5:
8998 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8999 {
9000 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9001 SET_SS_DEF();
9002 }
9003 else
9004 {
9005 uint32_t u32Disp;
9006 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9007 u32EffAddr += u32Disp;
9008 }
9009 break;
9010 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9011 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9012 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9013 }
9014 break;
9015 }
9016 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9017 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9018 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9019 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9020 }
9021
9022 /* Get and add the displacement. */
9023 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9024 {
9025 case 0:
9026 break;
9027 case 1:
9028 {
9029 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9030 u32EffAddr += i8Disp;
9031 break;
9032 }
9033 case 2:
9034 {
9035 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9036 u32EffAddr += u32Disp;
9037 break;
9038 }
9039 default:
9040 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9041 }
9042
9043 }
9044 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9045 *pGCPtrEff = u32EffAddr;
9046 else
9047 {
9048 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9049 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9050 }
9051 }
9052 }
9053 else
9054 {
9055 uint64_t u64EffAddr;
9056
9057 /* Handle the rip+disp32 form with no registers first. */
9058 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9059 {
9060 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9061 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9062 }
9063 else
9064 {
9065 /* Get the register (or SIB) value. */
9066 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9067 {
9068 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9069 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9070 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9071 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9072 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9073 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9074 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9075 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9076 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9077 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9078 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9079 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9080 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9081 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9082 /* SIB */
9083 case 4:
9084 case 12:
9085 {
9086 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9087
9088 /* Get the index and scale it. */
9089 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9090 {
9091 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9092 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9093 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9094 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9095 case 4: u64EffAddr = 0; /*none */ break;
9096 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9097 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9098 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9099 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9100 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9101 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9102 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9103 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9104 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9105 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9106 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9107 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9108 }
9109 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9110
9111 /* add base */
9112 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9113 {
9114 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9115 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9116 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9117 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9118 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9119 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9120 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9121 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9122 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9123 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9124 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9125 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9126 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9127 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9128 /* complicated encodings */
9129 case 5:
9130 case 13:
9131 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9132 {
9133 if (!pVCpu->iem.s.uRexB)
9134 {
9135 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9136 SET_SS_DEF();
9137 }
9138 else
9139 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9140 }
9141 else
9142 {
9143 uint32_t u32Disp;
9144 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9145 u64EffAddr += (int32_t)u32Disp;
9146 }
9147 break;
9148 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9149 }
9150 break;
9151 }
9152 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9153 }
9154
9155 /* Get and add the displacement. */
9156 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9157 {
9158 case 0:
9159 break;
9160 case 1:
9161 {
9162 int8_t i8Disp;
9163 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9164 u64EffAddr += i8Disp;
9165 break;
9166 }
9167 case 2:
9168 {
9169 uint32_t u32Disp;
9170 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9171 u64EffAddr += (int32_t)u32Disp;
9172 break;
9173 }
9174 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9175 }
9176
9177 }
9178
9179 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9180 *pGCPtrEff = u64EffAddr;
9181 else
9182 {
9183 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9184 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9185 }
9186 }
9187
9188 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9189 return VINF_SUCCESS;
9190}
9191
9192
9193#ifdef IEM_WITH_SETJMP
9194/**
9195 * Calculates the effective address of a ModR/M memory operand.
9196 *
9197 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9198 *
9199 * May longjmp on internal error.
9200 *
9201 * @return The effective address.
9202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9203 * @param bRm The ModRM byte.
9204 * @param cbImm The size of any immediate following the
9205 * effective address opcode bytes. Important for
9206 * RIP relative addressing.
9207 */
9208RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9209{
9210 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9211# define SET_SS_DEF() \
9212 do \
9213 { \
9214 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9215 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9216 } while (0)
9217
9218 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9219 {
9220/** @todo Check the effective address size crap! */
9221 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9222 {
9223 uint16_t u16EffAddr;
9224
9225 /* Handle the disp16 form with no registers first. */
9226 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9227 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9228 else
9229 {
9230 /* Get the displacment. */
9231 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9232 {
9233 case 0: u16EffAddr = 0; break;
9234 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9235 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9236 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9237 }
9238
9239 /* Add the base and index registers to the disp. */
9240 switch (bRm & X86_MODRM_RM_MASK)
9241 {
9242 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9243 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9244 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9245 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9246 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9247 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9248 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9249 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9250 }
9251 }
9252
9253 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9254 return u16EffAddr;
9255 }
9256
9257 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9258 uint32_t u32EffAddr;
9259
9260 /* Handle the disp32 form with no registers first. */
9261 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9262 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9263 else
9264 {
9265 /* Get the register (or SIB) value. */
9266 switch ((bRm & X86_MODRM_RM_MASK))
9267 {
9268 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9269 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9270 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9271 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9272 case 4: /* SIB */
9273 {
9274 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9275
9276 /* Get the index and scale it. */
9277 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9278 {
9279 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9280 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9281 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9282 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9283 case 4: u32EffAddr = 0; /*none */ break;
9284 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9285 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9286 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9287 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9288 }
9289 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9290
9291 /* add base */
9292 switch (bSib & X86_SIB_BASE_MASK)
9293 {
9294 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9295 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9296 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9297 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9298 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9299 case 5:
9300 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9301 {
9302 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9303 SET_SS_DEF();
9304 }
9305 else
9306 {
9307 uint32_t u32Disp;
9308 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9309 u32EffAddr += u32Disp;
9310 }
9311 break;
9312 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9313 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9314 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9315 }
9316 break;
9317 }
9318 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9319 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9320 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9321 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9322 }
9323
9324 /* Get and add the displacement. */
9325 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9326 {
9327 case 0:
9328 break;
9329 case 1:
9330 {
9331 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9332 u32EffAddr += i8Disp;
9333 break;
9334 }
9335 case 2:
9336 {
9337 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9338 u32EffAddr += u32Disp;
9339 break;
9340 }
9341 default:
9342 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9343 }
9344 }
9345
9346 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9347 {
9348 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9349 return u32EffAddr;
9350 }
9351 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9352 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9353 return u32EffAddr & UINT16_MAX;
9354 }
9355
9356 uint64_t u64EffAddr;
9357
9358 /* Handle the rip+disp32 form with no registers first. */
9359 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9360 {
9361 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9362 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9363 }
9364 else
9365 {
9366 /* Get the register (or SIB) value. */
9367 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9368 {
9369 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9370 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9371 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9372 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9373 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9374 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9375 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9376 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9377 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9378 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9379 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9380 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9381 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9382 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9383 /* SIB */
9384 case 4:
9385 case 12:
9386 {
9387 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9388
9389 /* Get the index and scale it. */
9390 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9391 {
9392 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9393 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9394 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9395 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9396 case 4: u64EffAddr = 0; /*none */ break;
9397 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9398 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9399 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9400 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9401 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9402 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9403 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9404 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9405 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9406 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9407 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9408 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9409 }
9410 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9411
9412 /* add base */
9413 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9414 {
9415 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9416 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9417 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9418 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9419 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9420 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9421 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9422 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9423 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9424 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9425 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9426 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9427 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9428 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9429 /* complicated encodings */
9430 case 5:
9431 case 13:
9432 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9433 {
9434 if (!pVCpu->iem.s.uRexB)
9435 {
9436 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9437 SET_SS_DEF();
9438 }
9439 else
9440 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9441 }
9442 else
9443 {
9444 uint32_t u32Disp;
9445 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9446 u64EffAddr += (int32_t)u32Disp;
9447 }
9448 break;
9449 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9450 }
9451 break;
9452 }
9453 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9454 }
9455
9456 /* Get and add the displacement. */
9457 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9458 {
9459 case 0:
9460 break;
9461 case 1:
9462 {
9463 int8_t i8Disp;
9464 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9465 u64EffAddr += i8Disp;
9466 break;
9467 }
9468 case 2:
9469 {
9470 uint32_t u32Disp;
9471 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9472 u64EffAddr += (int32_t)u32Disp;
9473 break;
9474 }
9475 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9476 }
9477
9478 }
9479
9480 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9481 {
9482 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9483 return u64EffAddr;
9484 }
9485 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9486 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9487 return u64EffAddr & UINT32_MAX;
9488}
9489#endif /* IEM_WITH_SETJMP */
9490
9491/** @} */
9492
9493
9494#ifdef LOG_ENABLED
9495/**
9496 * Logs the current instruction.
9497 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9498 * @param fSameCtx Set if we have the same context information as the VMM,
9499 * clear if we may have already executed an instruction in
9500 * our debug context. When clear, we assume IEMCPU holds
9501 * valid CPU mode info.
9502 *
9503 * The @a fSameCtx parameter is now misleading and obsolete.
9504 * @param pszFunction The IEM function doing the execution.
9505 */
9506static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9507{
9508# ifdef IN_RING3
9509 if (LogIs2Enabled())
9510 {
9511 char szInstr[256];
9512 uint32_t cbInstr = 0;
9513 if (fSameCtx)
9514 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9515 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9516 szInstr, sizeof(szInstr), &cbInstr);
9517 else
9518 {
9519 uint32_t fFlags = 0;
9520 switch (pVCpu->iem.s.enmCpuMode)
9521 {
9522 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9523 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9524 case IEMMODE_16BIT:
9525 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9526 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9527 else
9528 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9529 break;
9530 }
9531 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9532 szInstr, sizeof(szInstr), &cbInstr);
9533 }
9534
9535 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9536 Log2(("**** %s\n"
9537 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9538 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9539 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9540 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9541 " %s\n"
9542 , pszFunction,
9543 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9544 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9545 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9546 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9547 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9548 szInstr));
9549
9550 if (LogIs3Enabled())
9551 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9552 }
9553 else
9554# endif
9555 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9556 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9557 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9558}
9559#endif /* LOG_ENABLED */
9560
9561
9562#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9563/**
9564 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9565 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9566 *
9567 * @returns Modified rcStrict.
9568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9569 * @param rcStrict The instruction execution status.
9570 */
9571static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9572{
9573 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9574 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9575 {
9576 /* VMX preemption timer takes priority over NMI-window exits. */
9577 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9578 {
9579 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9580 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9581 }
9582 /*
9583 * Check remaining intercepts.
9584 *
9585 * NMI-window and Interrupt-window VM-exits.
9586 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9587 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9588 *
9589 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9590 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9591 */
9592 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9593 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9594 && !TRPMHasTrap(pVCpu))
9595 {
9596 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9597 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9598 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9599 {
9600 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9601 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9602 }
9603 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9604 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9605 {
9606 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9607 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9608 }
9609 }
9610 }
9611 /* TPR-below threshold/APIC write has the highest priority. */
9612 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9613 {
9614 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9615 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9616 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9617 }
9618 /* MTF takes priority over VMX-preemption timer. */
9619 else
9620 {
9621 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9622 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9623 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9624 }
9625 return rcStrict;
9626}
9627#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9628
9629
9630/**
9631 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9632 * IEMExecOneWithPrefetchedByPC.
9633 *
9634 * Similar code is found in IEMExecLots.
9635 *
9636 * @return Strict VBox status code.
9637 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9638 * @param fExecuteInhibit If set, execute the instruction following CLI,
9639 * POP SS and MOV SS,GR.
9640 * @param pszFunction The calling function name.
9641 */
9642DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9643{
9644 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9645 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9646 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9647 RT_NOREF_PV(pszFunction);
9648
9649#ifdef IEM_WITH_SETJMP
9650 VBOXSTRICTRC rcStrict;
9651 jmp_buf JmpBuf;
9652 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9653 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9654 if ((rcStrict = setjmp(JmpBuf)) == 0)
9655 {
9656 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9657 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9658 }
9659 else
9660 pVCpu->iem.s.cLongJumps++;
9661 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9662#else
9663 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9664 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9665#endif
9666 if (rcStrict == VINF_SUCCESS)
9667 pVCpu->iem.s.cInstructions++;
9668 if (pVCpu->iem.s.cActiveMappings > 0)
9669 {
9670 Assert(rcStrict != VINF_SUCCESS);
9671 iemMemRollback(pVCpu);
9672 }
9673 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9674 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9675 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9676
9677//#ifdef DEBUG
9678// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9679//#endif
9680
9681#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9682 /*
9683 * Perform any VMX nested-guest instruction boundary actions.
9684 *
9685 * If any of these causes a VM-exit, we must skip executing the next
9686 * instruction (would run into stale page tables). A VM-exit makes sure
9687 * there is no interrupt-inhibition, so that should ensure we don't go
9688 * to try execute the next instruction. Clearing fExecuteInhibit is
9689 * problematic because of the setjmp/longjmp clobbering above.
9690 */
9691 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9692 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9693 || rcStrict != VINF_SUCCESS)
9694 { /* likely */ }
9695 else
9696 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9697#endif
9698
9699 /* Execute the next instruction as well if a cli, pop ss or
9700 mov ss, Gr has just completed successfully. */
9701 if ( fExecuteInhibit
9702 && rcStrict == VINF_SUCCESS
9703 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9704 && EMIsInhibitInterruptsActive(pVCpu))
9705 {
9706 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9707 if (rcStrict == VINF_SUCCESS)
9708 {
9709#ifdef LOG_ENABLED
9710 iemLogCurInstr(pVCpu, false, pszFunction);
9711#endif
9712#ifdef IEM_WITH_SETJMP
9713 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9714 if ((rcStrict = setjmp(JmpBuf)) == 0)
9715 {
9716 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9717 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9718 }
9719 else
9720 pVCpu->iem.s.cLongJumps++;
9721 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9722#else
9723 IEM_OPCODE_GET_NEXT_U8(&b);
9724 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9725#endif
9726 if (rcStrict == VINF_SUCCESS)
9727 {
9728 pVCpu->iem.s.cInstructions++;
9729#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9730 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9731 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9732 { /* likely */ }
9733 else
9734 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9735#endif
9736 }
9737 if (pVCpu->iem.s.cActiveMappings > 0)
9738 {
9739 Assert(rcStrict != VINF_SUCCESS);
9740 iemMemRollback(pVCpu);
9741 }
9742 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9743 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9744 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9745 }
9746 else if (pVCpu->iem.s.cActiveMappings > 0)
9747 iemMemRollback(pVCpu);
9748 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9749 }
9750
9751 /*
9752 * Return value fiddling, statistics and sanity assertions.
9753 */
9754 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9755
9756 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9757 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9758 return rcStrict;
9759}
9760
9761
9762/**
9763 * Execute one instruction.
9764 *
9765 * @return Strict VBox status code.
9766 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9767 */
9768VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9769{
9770 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9771#ifdef LOG_ENABLED
9772 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9773#endif
9774
9775 /*
9776 * Do the decoding and emulation.
9777 */
9778 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9779 if (rcStrict == VINF_SUCCESS)
9780 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9781 else if (pVCpu->iem.s.cActiveMappings > 0)
9782 iemMemRollback(pVCpu);
9783
9784 if (rcStrict != VINF_SUCCESS)
9785 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9786 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9787 return rcStrict;
9788}
9789
9790
9791VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9792{
9793 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9794
9795 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9796 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9797 if (rcStrict == VINF_SUCCESS)
9798 {
9799 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9800 if (pcbWritten)
9801 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9802 }
9803 else if (pVCpu->iem.s.cActiveMappings > 0)
9804 iemMemRollback(pVCpu);
9805
9806 return rcStrict;
9807}
9808
9809
9810VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9811 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9812{
9813 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9814
9815 VBOXSTRICTRC rcStrict;
9816 if ( cbOpcodeBytes
9817 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9818 {
9819 iemInitDecoder(pVCpu, false, false);
9820#ifdef IEM_WITH_CODE_TLB
9821 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9822 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9823 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9824 pVCpu->iem.s.offCurInstrStart = 0;
9825 pVCpu->iem.s.offInstrNextByte = 0;
9826#else
9827 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9828 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9829#endif
9830 rcStrict = VINF_SUCCESS;
9831 }
9832 else
9833 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9834 if (rcStrict == VINF_SUCCESS)
9835 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9836 else if (pVCpu->iem.s.cActiveMappings > 0)
9837 iemMemRollback(pVCpu);
9838
9839 return rcStrict;
9840}
9841
9842
9843VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9844{
9845 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9846
9847 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9848 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9849 if (rcStrict == VINF_SUCCESS)
9850 {
9851 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9852 if (pcbWritten)
9853 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9854 }
9855 else if (pVCpu->iem.s.cActiveMappings > 0)
9856 iemMemRollback(pVCpu);
9857
9858 return rcStrict;
9859}
9860
9861
9862VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9863 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9864{
9865 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9866
9867 VBOXSTRICTRC rcStrict;
9868 if ( cbOpcodeBytes
9869 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9870 {
9871 iemInitDecoder(pVCpu, true, false);
9872#ifdef IEM_WITH_CODE_TLB
9873 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9874 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9875 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9876 pVCpu->iem.s.offCurInstrStart = 0;
9877 pVCpu->iem.s.offInstrNextByte = 0;
9878#else
9879 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9880 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9881#endif
9882 rcStrict = VINF_SUCCESS;
9883 }
9884 else
9885 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9886 if (rcStrict == VINF_SUCCESS)
9887 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9888 else if (pVCpu->iem.s.cActiveMappings > 0)
9889 iemMemRollback(pVCpu);
9890
9891 return rcStrict;
9892}
9893
9894
9895/**
9896 * For debugging DISGetParamSize, may come in handy.
9897 *
9898 * @returns Strict VBox status code.
9899 * @param pVCpu The cross context virtual CPU structure of the
9900 * calling EMT.
9901 * @param pCtxCore The context core structure.
9902 * @param OpcodeBytesPC The PC of the opcode bytes.
9903 * @param pvOpcodeBytes Prefeched opcode bytes.
9904 * @param cbOpcodeBytes Number of prefetched bytes.
9905 * @param pcbWritten Where to return the number of bytes written.
9906 * Optional.
9907 */
9908VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9909 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9910 uint32_t *pcbWritten)
9911{
9912 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9913
9914 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9915 VBOXSTRICTRC rcStrict;
9916 if ( cbOpcodeBytes
9917 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9918 {
9919 iemInitDecoder(pVCpu, true, false);
9920#ifdef IEM_WITH_CODE_TLB
9921 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9922 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9923 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9924 pVCpu->iem.s.offCurInstrStart = 0;
9925 pVCpu->iem.s.offInstrNextByte = 0;
9926#else
9927 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9928 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9929#endif
9930 rcStrict = VINF_SUCCESS;
9931 }
9932 else
9933 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9934 if (rcStrict == VINF_SUCCESS)
9935 {
9936 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9937 if (pcbWritten)
9938 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9939 }
9940 else if (pVCpu->iem.s.cActiveMappings > 0)
9941 iemMemRollback(pVCpu);
9942
9943 return rcStrict;
9944}
9945
9946
9947/**
9948 * For handling split cacheline lock operations when the host has split-lock
9949 * detection enabled.
9950 *
9951 * This will cause the interpreter to disregard the lock prefix and implicit
9952 * locking (xchg).
9953 *
9954 * @returns Strict VBox status code.
9955 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9956 */
9957VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9958{
9959 /*
9960 * Do the decoding and emulation.
9961 */
9962 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9963 if (rcStrict == VINF_SUCCESS)
9964 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9965 else if (pVCpu->iem.s.cActiveMappings > 0)
9966 iemMemRollback(pVCpu);
9967
9968 if (rcStrict != VINF_SUCCESS)
9969 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9970 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9971 return rcStrict;
9972}
9973
9974
9975VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9976{
9977 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9978 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9979
9980 /*
9981 * See if there is an interrupt pending in TRPM, inject it if we can.
9982 */
9983 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9984#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9985 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9986 if (fIntrEnabled)
9987 {
9988 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9989 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9990 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9991 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9992 else
9993 {
9994 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9995 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9996 }
9997 }
9998#else
9999 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10000#endif
10001
10002 /** @todo What if we are injecting an exception and not an interrupt? Is that
10003 * possible here? For now we assert it is indeed only an interrupt. */
10004 if ( fIntrEnabled
10005 && TRPMHasTrap(pVCpu)
10006 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
10007 {
10008 uint8_t u8TrapNo;
10009 TRPMEVENT enmType;
10010 uint32_t uErrCode;
10011 RTGCPTR uCr2;
10012 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
10013 AssertRC(rc2);
10014 Assert(enmType == TRPM_HARDWARE_INT);
10015 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10016 TRPMResetTrap(pVCpu);
10017#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10018 /* Injecting an event may cause a VM-exit. */
10019 if ( rcStrict != VINF_SUCCESS
10020 && rcStrict != VINF_IEM_RAISED_XCPT)
10021 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10022#else
10023 NOREF(rcStrict);
10024#endif
10025 }
10026
10027 /*
10028 * Initial decoder init w/ prefetch, then setup setjmp.
10029 */
10030 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10031 if (rcStrict == VINF_SUCCESS)
10032 {
10033#ifdef IEM_WITH_SETJMP
10034 jmp_buf JmpBuf;
10035 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10036 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10037 pVCpu->iem.s.cActiveMappings = 0;
10038 if ((rcStrict = setjmp(JmpBuf)) == 0)
10039#endif
10040 {
10041 /*
10042 * The run loop. We limit ourselves to 4096 instructions right now.
10043 */
10044 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10045 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10046 for (;;)
10047 {
10048 /*
10049 * Log the state.
10050 */
10051#ifdef LOG_ENABLED
10052 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10053#endif
10054
10055 /*
10056 * Do the decoding and emulation.
10057 */
10058 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10059 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10060 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10061 {
10062 Assert(pVCpu->iem.s.cActiveMappings == 0);
10063 pVCpu->iem.s.cInstructions++;
10064
10065#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10066 /* Perform any VMX nested-guest instruction boundary actions. */
10067 uint64_t fCpu = pVCpu->fLocalForcedActions;
10068 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10069 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10070 { /* likely */ }
10071 else
10072 {
10073 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10074 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10075 fCpu = pVCpu->fLocalForcedActions;
10076 else
10077 {
10078 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10079 break;
10080 }
10081 }
10082#endif
10083 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10084 {
10085#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10086 uint64_t fCpu = pVCpu->fLocalForcedActions;
10087#endif
10088 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10089 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10090 | VMCPU_FF_TLB_FLUSH
10091 | VMCPU_FF_INHIBIT_INTERRUPTS
10092 | VMCPU_FF_BLOCK_NMIS
10093 | VMCPU_FF_UNHALT );
10094
10095 if (RT_LIKELY( ( !fCpu
10096 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10097 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10098 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10099 {
10100 if (cMaxInstructionsGccStupidity-- > 0)
10101 {
10102 /* Poll timers every now an then according to the caller's specs. */
10103 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10104 || !TMTimerPollBool(pVM, pVCpu))
10105 {
10106 Assert(pVCpu->iem.s.cActiveMappings == 0);
10107 iemReInitDecoder(pVCpu);
10108 continue;
10109 }
10110 }
10111 }
10112 }
10113 Assert(pVCpu->iem.s.cActiveMappings == 0);
10114 }
10115 else if (pVCpu->iem.s.cActiveMappings > 0)
10116 iemMemRollback(pVCpu);
10117 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10118 break;
10119 }
10120 }
10121#ifdef IEM_WITH_SETJMP
10122 else
10123 {
10124 if (pVCpu->iem.s.cActiveMappings > 0)
10125 iemMemRollback(pVCpu);
10126# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10127 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10128# endif
10129 pVCpu->iem.s.cLongJumps++;
10130 }
10131 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10132#endif
10133
10134 /*
10135 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10136 */
10137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10139 }
10140 else
10141 {
10142 if (pVCpu->iem.s.cActiveMappings > 0)
10143 iemMemRollback(pVCpu);
10144
10145#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10146 /*
10147 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10148 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10149 */
10150 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10151#endif
10152 }
10153
10154 /*
10155 * Maybe re-enter raw-mode and log.
10156 */
10157 if (rcStrict != VINF_SUCCESS)
10158 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10159 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10160 if (pcInstructions)
10161 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10162 return rcStrict;
10163}
10164
10165
10166/**
10167 * Interface used by EMExecuteExec, does exit statistics and limits.
10168 *
10169 * @returns Strict VBox status code.
10170 * @param pVCpu The cross context virtual CPU structure.
10171 * @param fWillExit To be defined.
10172 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10173 * @param cMaxInstructions Maximum number of instructions to execute.
10174 * @param cMaxInstructionsWithoutExits
10175 * The max number of instructions without exits.
10176 * @param pStats Where to return statistics.
10177 */
10178VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10179 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10180{
10181 NOREF(fWillExit); /** @todo define flexible exit crits */
10182
10183 /*
10184 * Initialize return stats.
10185 */
10186 pStats->cInstructions = 0;
10187 pStats->cExits = 0;
10188 pStats->cMaxExitDistance = 0;
10189 pStats->cReserved = 0;
10190
10191 /*
10192 * Initial decoder init w/ prefetch, then setup setjmp.
10193 */
10194 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10195 if (rcStrict == VINF_SUCCESS)
10196 {
10197#ifdef IEM_WITH_SETJMP
10198 jmp_buf JmpBuf;
10199 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10200 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10201 pVCpu->iem.s.cActiveMappings = 0;
10202 if ((rcStrict = setjmp(JmpBuf)) == 0)
10203#endif
10204 {
10205#ifdef IN_RING0
10206 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10207#endif
10208 uint32_t cInstructionSinceLastExit = 0;
10209
10210 /*
10211 * The run loop. We limit ourselves to 4096 instructions right now.
10212 */
10213 PVM pVM = pVCpu->CTX_SUFF(pVM);
10214 for (;;)
10215 {
10216 /*
10217 * Log the state.
10218 */
10219#ifdef LOG_ENABLED
10220 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10221#endif
10222
10223 /*
10224 * Do the decoding and emulation.
10225 */
10226 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10227
10228 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10229 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10230
10231 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10232 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10233 {
10234 pStats->cExits += 1;
10235 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10236 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10237 cInstructionSinceLastExit = 0;
10238 }
10239
10240 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10241 {
10242 Assert(pVCpu->iem.s.cActiveMappings == 0);
10243 pVCpu->iem.s.cInstructions++;
10244 pStats->cInstructions++;
10245 cInstructionSinceLastExit++;
10246
10247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10248 /* Perform any VMX nested-guest instruction boundary actions. */
10249 uint64_t fCpu = pVCpu->fLocalForcedActions;
10250 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10251 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10252 { /* likely */ }
10253 else
10254 {
10255 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10256 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10257 fCpu = pVCpu->fLocalForcedActions;
10258 else
10259 {
10260 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10261 break;
10262 }
10263 }
10264#endif
10265 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10266 {
10267#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10268 uint64_t fCpu = pVCpu->fLocalForcedActions;
10269#endif
10270 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10271 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10272 | VMCPU_FF_TLB_FLUSH
10273 | VMCPU_FF_INHIBIT_INTERRUPTS
10274 | VMCPU_FF_BLOCK_NMIS
10275 | VMCPU_FF_UNHALT );
10276 if (RT_LIKELY( ( ( !fCpu
10277 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10278 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10279 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10280 || pStats->cInstructions < cMinInstructions))
10281 {
10282 if (pStats->cInstructions < cMaxInstructions)
10283 {
10284 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10285 {
10286#ifdef IN_RING0
10287 if ( !fCheckPreemptionPending
10288 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10289#endif
10290 {
10291 Assert(pVCpu->iem.s.cActiveMappings == 0);
10292 iemReInitDecoder(pVCpu);
10293 continue;
10294 }
10295#ifdef IN_RING0
10296 rcStrict = VINF_EM_RAW_INTERRUPT;
10297 break;
10298#endif
10299 }
10300 }
10301 }
10302 Assert(!(fCpu & VMCPU_FF_IEM));
10303 }
10304 Assert(pVCpu->iem.s.cActiveMappings == 0);
10305 }
10306 else if (pVCpu->iem.s.cActiveMappings > 0)
10307 iemMemRollback(pVCpu);
10308 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10309 break;
10310 }
10311 }
10312#ifdef IEM_WITH_SETJMP
10313 else
10314 {
10315 if (pVCpu->iem.s.cActiveMappings > 0)
10316 iemMemRollback(pVCpu);
10317 pVCpu->iem.s.cLongJumps++;
10318 }
10319 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10320#endif
10321
10322 /*
10323 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10324 */
10325 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10326 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10327 }
10328 else
10329 {
10330 if (pVCpu->iem.s.cActiveMappings > 0)
10331 iemMemRollback(pVCpu);
10332
10333#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10334 /*
10335 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10336 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10337 */
10338 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10339#endif
10340 }
10341
10342 /*
10343 * Maybe re-enter raw-mode and log.
10344 */
10345 if (rcStrict != VINF_SUCCESS)
10346 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10347 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10348 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10349 return rcStrict;
10350}
10351
10352
10353/**
10354 * Injects a trap, fault, abort, software interrupt or external interrupt.
10355 *
10356 * The parameter list matches TRPMQueryTrapAll pretty closely.
10357 *
10358 * @returns Strict VBox status code.
10359 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10360 * @param u8TrapNo The trap number.
10361 * @param enmType What type is it (trap/fault/abort), software
10362 * interrupt or hardware interrupt.
10363 * @param uErrCode The error code if applicable.
10364 * @param uCr2 The CR2 value if applicable.
10365 * @param cbInstr The instruction length (only relevant for
10366 * software interrupts).
10367 */
10368VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10369 uint8_t cbInstr)
10370{
10371 iemInitDecoder(pVCpu, false, false);
10372#ifdef DBGFTRACE_ENABLED
10373 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10374 u8TrapNo, enmType, uErrCode, uCr2);
10375#endif
10376
10377 uint32_t fFlags;
10378 switch (enmType)
10379 {
10380 case TRPM_HARDWARE_INT:
10381 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10382 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10383 uErrCode = uCr2 = 0;
10384 break;
10385
10386 case TRPM_SOFTWARE_INT:
10387 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10388 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10389 uErrCode = uCr2 = 0;
10390 break;
10391
10392 case TRPM_TRAP:
10393 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10394 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10395 if (u8TrapNo == X86_XCPT_PF)
10396 fFlags |= IEM_XCPT_FLAGS_CR2;
10397 switch (u8TrapNo)
10398 {
10399 case X86_XCPT_DF:
10400 case X86_XCPT_TS:
10401 case X86_XCPT_NP:
10402 case X86_XCPT_SS:
10403 case X86_XCPT_PF:
10404 case X86_XCPT_AC:
10405 case X86_XCPT_GP:
10406 fFlags |= IEM_XCPT_FLAGS_ERR;
10407 break;
10408 }
10409 break;
10410
10411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10412 }
10413
10414 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10415
10416 if (pVCpu->iem.s.cActiveMappings > 0)
10417 iemMemRollback(pVCpu);
10418
10419 return rcStrict;
10420}
10421
10422
10423/**
10424 * Injects the active TRPM event.
10425 *
10426 * @returns Strict VBox status code.
10427 * @param pVCpu The cross context virtual CPU structure.
10428 */
10429VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10430{
10431#ifndef IEM_IMPLEMENTS_TASKSWITCH
10432 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10433#else
10434 uint8_t u8TrapNo;
10435 TRPMEVENT enmType;
10436 uint32_t uErrCode;
10437 RTGCUINTPTR uCr2;
10438 uint8_t cbInstr;
10439 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10440 if (RT_FAILURE(rc))
10441 return rc;
10442
10443 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10444 * ICEBP \#DB injection as a special case. */
10445 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10446#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10447 if (rcStrict == VINF_SVM_VMEXIT)
10448 rcStrict = VINF_SUCCESS;
10449#endif
10450#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10451 if (rcStrict == VINF_VMX_VMEXIT)
10452 rcStrict = VINF_SUCCESS;
10453#endif
10454 /** @todo Are there any other codes that imply the event was successfully
10455 * delivered to the guest? See @bugref{6607}. */
10456 if ( rcStrict == VINF_SUCCESS
10457 || rcStrict == VINF_IEM_RAISED_XCPT)
10458 TRPMResetTrap(pVCpu);
10459
10460 return rcStrict;
10461#endif
10462}
10463
10464
10465VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10466{
10467 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10468 return VERR_NOT_IMPLEMENTED;
10469}
10470
10471
10472VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10473{
10474 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10475 return VERR_NOT_IMPLEMENTED;
10476}
10477
10478
10479#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10480/**
10481 * Executes a IRET instruction with default operand size.
10482 *
10483 * This is for PATM.
10484 *
10485 * @returns VBox status code.
10486 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10487 * @param pCtxCore The register frame.
10488 */
10489VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10490{
10491 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10492
10493 iemCtxCoreToCtx(pCtx, pCtxCore);
10494 iemInitDecoder(pVCpu);
10495 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10496 if (rcStrict == VINF_SUCCESS)
10497 iemCtxToCtxCore(pCtxCore, pCtx);
10498 else
10499 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10500 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10501 return rcStrict;
10502}
10503#endif
10504
10505
10506/**
10507 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10508 *
10509 * This API ASSUMES that the caller has already verified that the guest code is
10510 * allowed to access the I/O port. (The I/O port is in the DX register in the
10511 * guest state.)
10512 *
10513 * @returns Strict VBox status code.
10514 * @param pVCpu The cross context virtual CPU structure.
10515 * @param cbValue The size of the I/O port access (1, 2, or 4).
10516 * @param enmAddrMode The addressing mode.
10517 * @param fRepPrefix Indicates whether a repeat prefix is used
10518 * (doesn't matter which for this instruction).
10519 * @param cbInstr The instruction length in bytes.
10520 * @param iEffSeg The effective segment address.
10521 * @param fIoChecked Whether the access to the I/O port has been
10522 * checked or not. It's typically checked in the
10523 * HM scenario.
10524 */
10525VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10526 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10527{
10528 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10529 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10530
10531 /*
10532 * State init.
10533 */
10534 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10535
10536 /*
10537 * Switch orgy for getting to the right handler.
10538 */
10539 VBOXSTRICTRC rcStrict;
10540 if (fRepPrefix)
10541 {
10542 switch (enmAddrMode)
10543 {
10544 case IEMMODE_16BIT:
10545 switch (cbValue)
10546 {
10547 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10548 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10549 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10550 default:
10551 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10552 }
10553 break;
10554
10555 case IEMMODE_32BIT:
10556 switch (cbValue)
10557 {
10558 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10559 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10560 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10561 default:
10562 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10563 }
10564 break;
10565
10566 case IEMMODE_64BIT:
10567 switch (cbValue)
10568 {
10569 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10570 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10571 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10572 default:
10573 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10574 }
10575 break;
10576
10577 default:
10578 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10579 }
10580 }
10581 else
10582 {
10583 switch (enmAddrMode)
10584 {
10585 case IEMMODE_16BIT:
10586 switch (cbValue)
10587 {
10588 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10589 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10590 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10591 default:
10592 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10593 }
10594 break;
10595
10596 case IEMMODE_32BIT:
10597 switch (cbValue)
10598 {
10599 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10600 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10601 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10602 default:
10603 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10604 }
10605 break;
10606
10607 case IEMMODE_64BIT:
10608 switch (cbValue)
10609 {
10610 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10611 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10612 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10613 default:
10614 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10615 }
10616 break;
10617
10618 default:
10619 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10620 }
10621 }
10622
10623 if (pVCpu->iem.s.cActiveMappings)
10624 iemMemRollback(pVCpu);
10625
10626 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10627}
10628
10629
10630/**
10631 * Interface for HM and EM for executing string I/O IN (read) instructions.
10632 *
10633 * This API ASSUMES that the caller has already verified that the guest code is
10634 * allowed to access the I/O port. (The I/O port is in the DX register in the
10635 * guest state.)
10636 *
10637 * @returns Strict VBox status code.
10638 * @param pVCpu The cross context virtual CPU structure.
10639 * @param cbValue The size of the I/O port access (1, 2, or 4).
10640 * @param enmAddrMode The addressing mode.
10641 * @param fRepPrefix Indicates whether a repeat prefix is used
10642 * (doesn't matter which for this instruction).
10643 * @param cbInstr The instruction length in bytes.
10644 * @param fIoChecked Whether the access to the I/O port has been
10645 * checked or not. It's typically checked in the
10646 * HM scenario.
10647 */
10648VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10649 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10650{
10651 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10652
10653 /*
10654 * State init.
10655 */
10656 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10657
10658 /*
10659 * Switch orgy for getting to the right handler.
10660 */
10661 VBOXSTRICTRC rcStrict;
10662 if (fRepPrefix)
10663 {
10664 switch (enmAddrMode)
10665 {
10666 case IEMMODE_16BIT:
10667 switch (cbValue)
10668 {
10669 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10670 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10671 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10672 default:
10673 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10674 }
10675 break;
10676
10677 case IEMMODE_32BIT:
10678 switch (cbValue)
10679 {
10680 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10681 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10682 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10683 default:
10684 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10685 }
10686 break;
10687
10688 case IEMMODE_64BIT:
10689 switch (cbValue)
10690 {
10691 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10692 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10693 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10694 default:
10695 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10696 }
10697 break;
10698
10699 default:
10700 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10701 }
10702 }
10703 else
10704 {
10705 switch (enmAddrMode)
10706 {
10707 case IEMMODE_16BIT:
10708 switch (cbValue)
10709 {
10710 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10711 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10712 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10713 default:
10714 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10715 }
10716 break;
10717
10718 case IEMMODE_32BIT:
10719 switch (cbValue)
10720 {
10721 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10722 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10723 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10724 default:
10725 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10726 }
10727 break;
10728
10729 case IEMMODE_64BIT:
10730 switch (cbValue)
10731 {
10732 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10733 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10734 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10735 default:
10736 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10737 }
10738 break;
10739
10740 default:
10741 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10742 }
10743 }
10744
10745 if ( pVCpu->iem.s.cActiveMappings == 0
10746 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10747 { /* likely */ }
10748 else
10749 {
10750 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10751 iemMemRollback(pVCpu);
10752 }
10753 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10754}
10755
10756
10757/**
10758 * Interface for rawmode to write execute an OUT instruction.
10759 *
10760 * @returns Strict VBox status code.
10761 * @param pVCpu The cross context virtual CPU structure.
10762 * @param cbInstr The instruction length in bytes.
10763 * @param u16Port The port to read.
10764 * @param fImm Whether the port is specified using an immediate operand or
10765 * using the implicit DX register.
10766 * @param cbReg The register size.
10767 *
10768 * @remarks In ring-0 not all of the state needs to be synced in.
10769 */
10770VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10771{
10772 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10773 Assert(cbReg <= 4 && cbReg != 3);
10774
10775 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10776 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10777 Assert(!pVCpu->iem.s.cActiveMappings);
10778 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10779}
10780
10781
10782/**
10783 * Interface for rawmode to write execute an IN instruction.
10784 *
10785 * @returns Strict VBox status code.
10786 * @param pVCpu The cross context virtual CPU structure.
10787 * @param cbInstr The instruction length in bytes.
10788 * @param u16Port The port to read.
10789 * @param fImm Whether the port is specified using an immediate operand or
10790 * using the implicit DX.
10791 * @param cbReg The register size.
10792 */
10793VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10794{
10795 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10796 Assert(cbReg <= 4 && cbReg != 3);
10797
10798 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10799 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10800 Assert(!pVCpu->iem.s.cActiveMappings);
10801 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10802}
10803
10804
10805/**
10806 * Interface for HM and EM to write to a CRx register.
10807 *
10808 * @returns Strict VBox status code.
10809 * @param pVCpu The cross context virtual CPU structure.
10810 * @param cbInstr The instruction length in bytes.
10811 * @param iCrReg The control register number (destination).
10812 * @param iGReg The general purpose register number (source).
10813 *
10814 * @remarks In ring-0 not all of the state needs to be synced in.
10815 */
10816VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10817{
10818 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10819 Assert(iCrReg < 16);
10820 Assert(iGReg < 16);
10821
10822 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10823 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10824 Assert(!pVCpu->iem.s.cActiveMappings);
10825 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10826}
10827
10828
10829/**
10830 * Interface for HM and EM to read from a CRx register.
10831 *
10832 * @returns Strict VBox status code.
10833 * @param pVCpu The cross context virtual CPU structure.
10834 * @param cbInstr The instruction length in bytes.
10835 * @param iGReg The general purpose register number (destination).
10836 * @param iCrReg The control register number (source).
10837 *
10838 * @remarks In ring-0 not all of the state needs to be synced in.
10839 */
10840VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10841{
10842 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10843 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10844 | CPUMCTX_EXTRN_APIC_TPR);
10845 Assert(iCrReg < 16);
10846 Assert(iGReg < 16);
10847
10848 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10849 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10850 Assert(!pVCpu->iem.s.cActiveMappings);
10851 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10852}
10853
10854
10855/**
10856 * Interface for HM and EM to clear the CR0[TS] bit.
10857 *
10858 * @returns Strict VBox status code.
10859 * @param pVCpu The cross context virtual CPU structure.
10860 * @param cbInstr The instruction length in bytes.
10861 *
10862 * @remarks In ring-0 not all of the state needs to be synced in.
10863 */
10864VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10865{
10866 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10867
10868 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10869 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10870 Assert(!pVCpu->iem.s.cActiveMappings);
10871 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10872}
10873
10874
10875/**
10876 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10877 *
10878 * @returns Strict VBox status code.
10879 * @param pVCpu The cross context virtual CPU structure.
10880 * @param cbInstr The instruction length in bytes.
10881 * @param uValue The value to load into CR0.
10882 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10883 * memory operand. Otherwise pass NIL_RTGCPTR.
10884 *
10885 * @remarks In ring-0 not all of the state needs to be synced in.
10886 */
10887VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10888{
10889 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10890
10891 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10892 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10893 Assert(!pVCpu->iem.s.cActiveMappings);
10894 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10895}
10896
10897
10898/**
10899 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10900 *
10901 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10902 *
10903 * @returns Strict VBox status code.
10904 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10905 * @param cbInstr The instruction length in bytes.
10906 * @remarks In ring-0 not all of the state needs to be synced in.
10907 * @thread EMT(pVCpu)
10908 */
10909VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10910{
10911 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10912
10913 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10914 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10915 Assert(!pVCpu->iem.s.cActiveMappings);
10916 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10917}
10918
10919
10920/**
10921 * Interface for HM and EM to emulate the WBINVD instruction.
10922 *
10923 * @returns Strict VBox status code.
10924 * @param pVCpu The cross context virtual CPU structure.
10925 * @param cbInstr The instruction length in bytes.
10926 *
10927 * @remarks In ring-0 not all of the state needs to be synced in.
10928 */
10929VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10930{
10931 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10932
10933 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10934 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10935 Assert(!pVCpu->iem.s.cActiveMappings);
10936 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10937}
10938
10939
10940/**
10941 * Interface for HM and EM to emulate the INVD instruction.
10942 *
10943 * @returns Strict VBox status code.
10944 * @param pVCpu The cross context virtual CPU structure.
10945 * @param cbInstr The instruction length in bytes.
10946 *
10947 * @remarks In ring-0 not all of the state needs to be synced in.
10948 */
10949VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10950{
10951 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10952
10953 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10954 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10955 Assert(!pVCpu->iem.s.cActiveMappings);
10956 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10957}
10958
10959
10960/**
10961 * Interface for HM and EM to emulate the INVLPG instruction.
10962 *
10963 * @returns Strict VBox status code.
10964 * @retval VINF_PGM_SYNC_CR3
10965 *
10966 * @param pVCpu The cross context virtual CPU structure.
10967 * @param cbInstr The instruction length in bytes.
10968 * @param GCPtrPage The effective address of the page to invalidate.
10969 *
10970 * @remarks In ring-0 not all of the state needs to be synced in.
10971 */
10972VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10973{
10974 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10975
10976 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10977 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10978 Assert(!pVCpu->iem.s.cActiveMappings);
10979 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10980}
10981
10982
10983/**
10984 * Interface for HM and EM to emulate the INVPCID instruction.
10985 *
10986 * @returns Strict VBox status code.
10987 * @retval VINF_PGM_SYNC_CR3
10988 *
10989 * @param pVCpu The cross context virtual CPU structure.
10990 * @param cbInstr The instruction length in bytes.
10991 * @param iEffSeg The effective segment register.
10992 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10993 * @param uType The invalidation type.
10994 *
10995 * @remarks In ring-0 not all of the state needs to be synced in.
10996 */
10997VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10998 uint64_t uType)
10999{
11000 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11001
11002 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11003 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11004 Assert(!pVCpu->iem.s.cActiveMappings);
11005 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11006}
11007
11008
11009/**
11010 * Interface for HM and EM to emulate the CPUID instruction.
11011 *
11012 * @returns Strict VBox status code.
11013 *
11014 * @param pVCpu The cross context virtual CPU structure.
11015 * @param cbInstr The instruction length in bytes.
11016 *
11017 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11018 */
11019VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11020{
11021 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11022 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11023
11024 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11025 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11026 Assert(!pVCpu->iem.s.cActiveMappings);
11027 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11028}
11029
11030
11031/**
11032 * Interface for HM and EM to emulate the RDPMC instruction.
11033 *
11034 * @returns Strict VBox status code.
11035 *
11036 * @param pVCpu The cross context virtual CPU structure.
11037 * @param cbInstr The instruction length in bytes.
11038 *
11039 * @remarks Not all of the state needs to be synced in.
11040 */
11041VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11042{
11043 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11044 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11045
11046 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11047 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11048 Assert(!pVCpu->iem.s.cActiveMappings);
11049 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11050}
11051
11052
11053/**
11054 * Interface for HM and EM to emulate the RDTSC instruction.
11055 *
11056 * @returns Strict VBox status code.
11057 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11058 *
11059 * @param pVCpu The cross context virtual CPU structure.
11060 * @param cbInstr The instruction length in bytes.
11061 *
11062 * @remarks Not all of the state needs to be synced in.
11063 */
11064VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11065{
11066 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11067 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11068
11069 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11070 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11071 Assert(!pVCpu->iem.s.cActiveMappings);
11072 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11073}
11074
11075
11076/**
11077 * Interface for HM and EM to emulate the RDTSCP instruction.
11078 *
11079 * @returns Strict VBox status code.
11080 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11081 *
11082 * @param pVCpu The cross context virtual CPU structure.
11083 * @param cbInstr The instruction length in bytes.
11084 *
11085 * @remarks Not all of the state needs to be synced in. Recommended
11086 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11087 */
11088VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11089{
11090 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11091 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11092
11093 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11094 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11095 Assert(!pVCpu->iem.s.cActiveMappings);
11096 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11097}
11098
11099
11100/**
11101 * Interface for HM and EM to emulate the RDMSR instruction.
11102 *
11103 * @returns Strict VBox status code.
11104 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11105 *
11106 * @param pVCpu The cross context virtual CPU structure.
11107 * @param cbInstr The instruction length in bytes.
11108 *
11109 * @remarks Not all of the state needs to be synced in. Requires RCX and
11110 * (currently) all MSRs.
11111 */
11112VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11113{
11114 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11115 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11116
11117 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11118 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11119 Assert(!pVCpu->iem.s.cActiveMappings);
11120 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11121}
11122
11123
11124/**
11125 * Interface for HM and EM to emulate the WRMSR instruction.
11126 *
11127 * @returns Strict VBox status code.
11128 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11129 *
11130 * @param pVCpu The cross context virtual CPU structure.
11131 * @param cbInstr The instruction length in bytes.
11132 *
11133 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11134 * and (currently) all MSRs.
11135 */
11136VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11137{
11138 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11139 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11140 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11141
11142 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11143 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11144 Assert(!pVCpu->iem.s.cActiveMappings);
11145 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11146}
11147
11148
11149/**
11150 * Interface for HM and EM to emulate the MONITOR instruction.
11151 *
11152 * @returns Strict VBox status code.
11153 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11154 *
11155 * @param pVCpu The cross context virtual CPU structure.
11156 * @param cbInstr The instruction length in bytes.
11157 *
11158 * @remarks Not all of the state needs to be synced in.
11159 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11160 * are used.
11161 */
11162VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11163{
11164 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11165 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11166
11167 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11168 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11169 Assert(!pVCpu->iem.s.cActiveMappings);
11170 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11171}
11172
11173
11174/**
11175 * Interface for HM and EM to emulate the MWAIT instruction.
11176 *
11177 * @returns Strict VBox status code.
11178 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11179 *
11180 * @param pVCpu The cross context virtual CPU structure.
11181 * @param cbInstr The instruction length in bytes.
11182 *
11183 * @remarks Not all of the state needs to be synced in.
11184 */
11185VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11186{
11187 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11188 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11189
11190 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11191 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11192 Assert(!pVCpu->iem.s.cActiveMappings);
11193 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11194}
11195
11196
11197/**
11198 * Interface for HM and EM to emulate the HLT instruction.
11199 *
11200 * @returns Strict VBox status code.
11201 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11202 *
11203 * @param pVCpu The cross context virtual CPU structure.
11204 * @param cbInstr The instruction length in bytes.
11205 *
11206 * @remarks Not all of the state needs to be synced in.
11207 */
11208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11209{
11210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11211
11212 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11213 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11214 Assert(!pVCpu->iem.s.cActiveMappings);
11215 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11216}
11217
11218
11219/**
11220 * Checks if IEM is in the process of delivering an event (interrupt or
11221 * exception).
11222 *
11223 * @returns true if we're in the process of raising an interrupt or exception,
11224 * false otherwise.
11225 * @param pVCpu The cross context virtual CPU structure.
11226 * @param puVector Where to store the vector associated with the
11227 * currently delivered event, optional.
11228 * @param pfFlags Where to store th event delivery flags (see
11229 * IEM_XCPT_FLAGS_XXX), optional.
11230 * @param puErr Where to store the error code associated with the
11231 * event, optional.
11232 * @param puCr2 Where to store the CR2 associated with the event,
11233 * optional.
11234 * @remarks The caller should check the flags to determine if the error code and
11235 * CR2 are valid for the event.
11236 */
11237VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11238{
11239 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11240 if (fRaisingXcpt)
11241 {
11242 if (puVector)
11243 *puVector = pVCpu->iem.s.uCurXcpt;
11244 if (pfFlags)
11245 *pfFlags = pVCpu->iem.s.fCurXcpt;
11246 if (puErr)
11247 *puErr = pVCpu->iem.s.uCurXcptErr;
11248 if (puCr2)
11249 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11250 }
11251 return fRaisingXcpt;
11252}
11253
11254#ifdef IN_RING3
11255
11256/**
11257 * Handles the unlikely and probably fatal merge cases.
11258 *
11259 * @returns Merged status code.
11260 * @param rcStrict Current EM status code.
11261 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11262 * with @a rcStrict.
11263 * @param iMemMap The memory mapping index. For error reporting only.
11264 * @param pVCpu The cross context virtual CPU structure of the calling
11265 * thread, for error reporting only.
11266 */
11267DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11268 unsigned iMemMap, PVMCPUCC pVCpu)
11269{
11270 if (RT_FAILURE_NP(rcStrict))
11271 return rcStrict;
11272
11273 if (RT_FAILURE_NP(rcStrictCommit))
11274 return rcStrictCommit;
11275
11276 if (rcStrict == rcStrictCommit)
11277 return rcStrictCommit;
11278
11279 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11280 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11281 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11282 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11283 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11284 return VERR_IOM_FF_STATUS_IPE;
11285}
11286
11287
11288/**
11289 * Helper for IOMR3ProcessForceFlag.
11290 *
11291 * @returns Merged status code.
11292 * @param rcStrict Current EM status code.
11293 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11294 * with @a rcStrict.
11295 * @param iMemMap The memory mapping index. For error reporting only.
11296 * @param pVCpu The cross context virtual CPU structure of the calling
11297 * thread, for error reporting only.
11298 */
11299DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11300{
11301 /* Simple. */
11302 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11303 return rcStrictCommit;
11304
11305 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11306 return rcStrict;
11307
11308 /* EM scheduling status codes. */
11309 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11310 && rcStrict <= VINF_EM_LAST))
11311 {
11312 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11313 && rcStrictCommit <= VINF_EM_LAST))
11314 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11315 }
11316
11317 /* Unlikely */
11318 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11319}
11320
11321
11322/**
11323 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11324 *
11325 * @returns Merge between @a rcStrict and what the commit operation returned.
11326 * @param pVM The cross context VM structure.
11327 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11328 * @param rcStrict The status code returned by ring-0 or raw-mode.
11329 */
11330VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11331{
11332 /*
11333 * Reset the pending commit.
11334 */
11335 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11336 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11337 ("%#x %#x %#x\n",
11338 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11339 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11340
11341 /*
11342 * Commit the pending bounce buffers (usually just one).
11343 */
11344 unsigned cBufs = 0;
11345 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11346 while (iMemMap-- > 0)
11347 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11348 {
11349 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11350 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11351 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11352
11353 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11354 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11355 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11356
11357 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11358 {
11359 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11361 pbBuf,
11362 cbFirst,
11363 PGMACCESSORIGIN_IEM);
11364 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11365 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11366 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11367 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11368 }
11369
11370 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11371 {
11372 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11374 pbBuf + cbFirst,
11375 cbSecond,
11376 PGMACCESSORIGIN_IEM);
11377 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11378 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11379 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11380 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11381 }
11382 cBufs++;
11383 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11384 }
11385
11386 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11387 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11388 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11389 pVCpu->iem.s.cActiveMappings = 0;
11390 return rcStrict;
11391}
11392
11393#endif /* IN_RING3 */
11394
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use