VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp@ 96733

Last change on this file since 96733 was 96637, checked in by vboxsync, 21 months ago

VMM/IEM: reverted 153516 as the destination is 64 byte aligned if accessed directly (see check above) and r153518 corrected the misaligned bounce buffers. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 340.3 KB
Line 
1/* $Id: IEMAllCImpl.cpp 96637 2022-09-07 16:26:49Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/apic.h>
37#include <VBox/vmm/pdm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/iom.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/hm.h>
42#include <VBox/vmm/nem.h>
43#include <VBox/vmm/gim.h>
44#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
45# include <VBox/vmm/em.h>
46# include <VBox/vmm/hm_svm.h>
47#endif
48#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
49# include <VBox/vmm/hmvmxinline.h>
50#endif
51#include <VBox/vmm/tm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/vmm/dbgftrace.h>
54#include "IEMInternal.h"
55#include <VBox/vmm/vmcc.h>
56#include <VBox/log.h>
57#include <VBox/err.h>
58#include <VBox/param.h>
59#include <VBox/dis.h>
60#include <VBox/disopcode.h>
61#include <iprt/asm-math.h>
62#include <iprt/assert.h>
63#include <iprt/string.h>
64#include <iprt/x86.h>
65
66#include "IEMInline.h"
67
68
69/** @name Misc Helpers
70 * @{
71 */
72
73
74/**
75 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
76 *
77 * @returns Strict VBox status code.
78 *
79 * @param pVCpu The cross context virtual CPU structure of the calling thread.
80 * @param u16Port The port number.
81 * @param cbOperand The operand size.
82 */
83static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
84{
85 /* The TSS bits we're interested in are the same on 386 and AMD64. */
86 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
87 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
88 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
89 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
90
91 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
92
93 /*
94 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
95 */
96 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
97 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
98 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
99 {
100 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
101 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u));
102 return iemRaiseGeneralProtectionFault0(pVCpu);
103 }
104
105 /*
106 * Read the bitmap offset (may #PF).
107 */
108 uint16_t offBitmap;
109 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
110 pVCpu->cpum.GstCtx.tr.u64Base + RT_UOFFSETOF(X86TSS64, offIoBitmap));
111 if (rcStrict != VINF_SUCCESS)
112 {
113 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
114 return rcStrict;
115 }
116
117 /*
118 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
119 * describes the CPU actually reading two bytes regardless of whether the
120 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
121 */
122 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
123 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
124 * for instance sizeof(X86TSS32). */
125 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */
126 {
127 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
128 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit));
129 return iemRaiseGeneralProtectionFault0(pVCpu);
130 }
131
132 /*
133 * Read the necessary bits.
134 */
135 /** @todo Test the assertion in the intel manual that the CPU reads two
136 * bytes. The question is how this works wrt to \#PF and \#GP on the
137 * 2nd byte when it's not required. */
138 uint16_t bmBytes = UINT16_MAX;
139 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit);
140 if (rcStrict != VINF_SUCCESS)
141 {
142 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
143 return rcStrict;
144 }
145
146 /*
147 * Perform the check.
148 */
149 uint16_t fPortMask = (1 << cbOperand) - 1;
150 bmBytes >>= (u16Port & 7);
151 if (bmBytes & fPortMask)
152 {
153 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
154 u16Port, cbOperand, bmBytes, fPortMask));
155 return iemRaiseGeneralProtectionFault0(pVCpu);
156 }
157
158 return VINF_SUCCESS;
159}
160
161
162/**
163 * Checks if we are allowed to access the given I/O port, raising the
164 * appropriate exceptions if we aren't (or if the I/O bitmap is not
165 * accessible).
166 *
167 * @returns Strict VBox status code.
168 *
169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
170 * @param u16Port The port number.
171 * @param cbOperand The operand size.
172 */
173DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
174{
175 X86EFLAGS Efl;
176 Efl.u = IEMMISC_GET_EFL(pVCpu);
177 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
178 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL
179 || Efl.Bits.u1VM) )
180 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand);
181 return VINF_SUCCESS;
182}
183
184
185#if 0
186/**
187 * Calculates the parity bit.
188 *
189 * @returns true if the bit is set, false if not.
190 * @param u8Result The least significant byte of the result.
191 */
192static bool iemHlpCalcParityFlag(uint8_t u8Result)
193{
194 /*
195 * Parity is set if the number of bits in the least significant byte of
196 * the result is even.
197 */
198 uint8_t cBits;
199 cBits = u8Result & 1; /* 0 */
200 u8Result >>= 1;
201 cBits += u8Result & 1;
202 u8Result >>= 1;
203 cBits += u8Result & 1;
204 u8Result >>= 1;
205 cBits += u8Result & 1;
206 u8Result >>= 1;
207 cBits += u8Result & 1; /* 4 */
208 u8Result >>= 1;
209 cBits += u8Result & 1;
210 u8Result >>= 1;
211 cBits += u8Result & 1;
212 u8Result >>= 1;
213 cBits += u8Result & 1;
214 return !(cBits & 1);
215}
216#endif /* not used */
217
218
219/**
220 * Updates the specified flags according to a 8-bit result.
221 *
222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
223 * @param u8Result The result to set the flags according to.
224 * @param fToUpdate The flags to update.
225 * @param fUndefined The flags that are specified as undefined.
226 */
227static void iemHlpUpdateArithEFlagsU8(PVMCPUCC pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
228{
229 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
230 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
231 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
232 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
233}
234
235
236/**
237 * Updates the specified flags according to a 16-bit result.
238 *
239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
240 * @param u16Result The result to set the flags according to.
241 * @param fToUpdate The flags to update.
242 * @param fUndefined The flags that are specified as undefined.
243 */
244static void iemHlpUpdateArithEFlagsU16(PVMCPUCC pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
245{
246 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
247 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
248 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
249 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
250}
251
252
253/**
254 * Helper used by iret.
255 *
256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
257 * @param uCpl The new CPL.
258 * @param pSReg Pointer to the segment register.
259 */
260static void iemHlpAdjustSelectorForNewCpl(PVMCPUCC pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
261{
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
263 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
264
265 if ( uCpl > pSReg->Attr.n.u2Dpl
266 && pSReg->Attr.n.u1DescType /* code or data, not system */
267 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
268 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
269 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
270}
271
272
273/**
274 * Indicates that we have modified the FPU state.
275 *
276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
277 */
278DECLINLINE(void) iemHlpUsedFpu(PVMCPUCC pVCpu)
279{
280 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
281}
282
283/** @} */
284
285/** @name C Implementations
286 * @{
287 */
288
289/**
290 * Implements a 16-bit popa.
291 */
292IEM_CIMPL_DEF_0(iemCImpl_popa_16)
293{
294 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
295 RTGCPTR GCPtrLast = GCPtrStart + 15;
296 VBOXSTRICTRC rcStrict;
297
298 /*
299 * The docs are a bit hard to comprehend here, but it looks like we wrap
300 * around in real mode as long as none of the individual "popa" crosses the
301 * end of the stack segment. In protected mode we check the whole access
302 * in one go. For efficiency, only do the word-by-word thing if we're in
303 * danger of wrapping around.
304 */
305 /** @todo do popa boundary / wrap-around checks. */
306 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
307 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
308 {
309 /* word-by-word */
310 RTUINT64U TmpRsp;
311 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
312 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp);
313 if (rcStrict == VINF_SUCCESS)
314 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp);
315 if (rcStrict == VINF_SUCCESS)
316 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp);
317 if (rcStrict == VINF_SUCCESS)
318 {
319 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
320 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp);
321 }
322 if (rcStrict == VINF_SUCCESS)
323 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp);
324 if (rcStrict == VINF_SUCCESS)
325 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp);
326 if (rcStrict == VINF_SUCCESS)
327 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp);
328 if (rcStrict == VINF_SUCCESS)
329 {
330 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
331 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
332 }
333 }
334 else
335 {
336 uint16_t const *pa16Mem = NULL;
337 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1);
338 if (rcStrict == VINF_SUCCESS)
339 {
340 pVCpu->cpum.GstCtx.di = pa16Mem[7 - X86_GREG_xDI];
341 pVCpu->cpum.GstCtx.si = pa16Mem[7 - X86_GREG_xSI];
342 pVCpu->cpum.GstCtx.bp = pa16Mem[7 - X86_GREG_xBP];
343 /* skip sp */
344 pVCpu->cpum.GstCtx.bx = pa16Mem[7 - X86_GREG_xBX];
345 pVCpu->cpum.GstCtx.dx = pa16Mem[7 - X86_GREG_xDX];
346 pVCpu->cpum.GstCtx.cx = pa16Mem[7 - X86_GREG_xCX];
347 pVCpu->cpum.GstCtx.ax = pa16Mem[7 - X86_GREG_xAX];
348 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
349 if (rcStrict == VINF_SUCCESS)
350 {
351 iemRegAddToRsp(pVCpu, 16);
352 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
353 }
354 }
355 }
356 return rcStrict;
357}
358
359
360/**
361 * Implements a 32-bit popa.
362 */
363IEM_CIMPL_DEF_0(iemCImpl_popa_32)
364{
365 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
366 RTGCPTR GCPtrLast = GCPtrStart + 31;
367 VBOXSTRICTRC rcStrict;
368
369 /*
370 * The docs are a bit hard to comprehend here, but it looks like we wrap
371 * around in real mode as long as none of the individual "popa" crosses the
372 * end of the stack segment. In protected mode we check the whole access
373 * in one go. For efficiency, only do the word-by-word thing if we're in
374 * danger of wrapping around.
375 */
376 /** @todo do popa boundary / wrap-around checks. */
377 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
378 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
379 {
380 /* word-by-word */
381 RTUINT64U TmpRsp;
382 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
383 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp);
384 if (rcStrict == VINF_SUCCESS)
385 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp);
386 if (rcStrict == VINF_SUCCESS)
387 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp);
388 if (rcStrict == VINF_SUCCESS)
389 {
390 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
391 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp);
392 }
393 if (rcStrict == VINF_SUCCESS)
394 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp);
395 if (rcStrict == VINF_SUCCESS)
396 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp);
397 if (rcStrict == VINF_SUCCESS)
398 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp);
399 if (rcStrict == VINF_SUCCESS)
400 {
401#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
402 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX;
403 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX;
404 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX;
405 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX;
406 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX;
407 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX;
408 pVCpu->cpum.GstCtx.rax &= UINT32_MAX;
409#endif
410 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
411 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
412 }
413 }
414 else
415 {
416 uint32_t const *pa32Mem;
417 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1);
418 if (rcStrict == VINF_SUCCESS)
419 {
420 pVCpu->cpum.GstCtx.rdi = pa32Mem[7 - X86_GREG_xDI];
421 pVCpu->cpum.GstCtx.rsi = pa32Mem[7 - X86_GREG_xSI];
422 pVCpu->cpum.GstCtx.rbp = pa32Mem[7 - X86_GREG_xBP];
423 /* skip esp */
424 pVCpu->cpum.GstCtx.rbx = pa32Mem[7 - X86_GREG_xBX];
425 pVCpu->cpum.GstCtx.rdx = pa32Mem[7 - X86_GREG_xDX];
426 pVCpu->cpum.GstCtx.rcx = pa32Mem[7 - X86_GREG_xCX];
427 pVCpu->cpum.GstCtx.rax = pa32Mem[7 - X86_GREG_xAX];
428 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
429 if (rcStrict == VINF_SUCCESS)
430 {
431 iemRegAddToRsp(pVCpu, 32);
432 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
433 }
434 }
435 }
436 return rcStrict;
437}
438
439
440/**
441 * Implements a 16-bit pusha.
442 */
443IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
444{
445 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
446 RTGCPTR GCPtrBottom = GCPtrTop - 15;
447 VBOXSTRICTRC rcStrict;
448
449 /*
450 * The docs are a bit hard to comprehend here, but it looks like we wrap
451 * around in real mode as long as none of the individual "pushd" crosses the
452 * end of the stack segment. In protected mode we check the whole access
453 * in one go. For efficiency, only do the word-by-word thing if we're in
454 * danger of wrapping around.
455 */
456 /** @todo do pusha boundary / wrap-around checks. */
457 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
458 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
459 {
460 /* word-by-word */
461 RTUINT64U TmpRsp;
462 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
463 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp);
464 if (rcStrict == VINF_SUCCESS)
465 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp);
466 if (rcStrict == VINF_SUCCESS)
467 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp);
468 if (rcStrict == VINF_SUCCESS)
469 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp);
470 if (rcStrict == VINF_SUCCESS)
471 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp);
472 if (rcStrict == VINF_SUCCESS)
473 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp);
474 if (rcStrict == VINF_SUCCESS)
475 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp);
476 if (rcStrict == VINF_SUCCESS)
477 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp);
478 if (rcStrict == VINF_SUCCESS)
479 {
480 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
481 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
482 }
483 }
484 else
485 {
486 GCPtrBottom--;
487 uint16_t *pa16Mem = NULL;
488 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1);
489 if (rcStrict == VINF_SUCCESS)
490 {
491 pa16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
492 pa16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
493 pa16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
494 pa16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
495 pa16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
496 pa16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
497 pa16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
498 pa16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
499 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
500 if (rcStrict == VINF_SUCCESS)
501 {
502 iemRegSubFromRsp(pVCpu, 16);
503 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
504 }
505 }
506 }
507 return rcStrict;
508}
509
510
511/**
512 * Implements a 32-bit pusha.
513 */
514IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
515{
516 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
517 RTGCPTR GCPtrBottom = GCPtrTop - 31;
518 VBOXSTRICTRC rcStrict;
519
520 /*
521 * The docs are a bit hard to comprehend here, but it looks like we wrap
522 * around in real mode as long as none of the individual "pusha" crosses the
523 * end of the stack segment. In protected mode we check the whole access
524 * in one go. For efficiency, only do the word-by-word thing if we're in
525 * danger of wrapping around.
526 */
527 /** @todo do pusha boundary / wrap-around checks. */
528 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
529 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
530 {
531 /* word-by-word */
532 RTUINT64U TmpRsp;
533 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
534 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp);
535 if (rcStrict == VINF_SUCCESS)
536 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp);
537 if (rcStrict == VINF_SUCCESS)
538 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp);
539 if (rcStrict == VINF_SUCCESS)
540 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp);
541 if (rcStrict == VINF_SUCCESS)
542 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp);
543 if (rcStrict == VINF_SUCCESS)
544 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp);
545 if (rcStrict == VINF_SUCCESS)
546 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp);
547 if (rcStrict == VINF_SUCCESS)
548 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp);
549 if (rcStrict == VINF_SUCCESS)
550 {
551 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
552 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
553 }
554 }
555 else
556 {
557 GCPtrBottom--;
558 uint32_t *pa32Mem;
559 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1);
560 if (rcStrict == VINF_SUCCESS)
561 {
562 pa32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
563 pa32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
564 pa32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
565 pa32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
566 pa32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
567 pa32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
568 pa32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
569 pa32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
570 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
571 if (rcStrict == VINF_SUCCESS)
572 {
573 iemRegSubFromRsp(pVCpu, 32);
574 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
575 }
576 }
577 }
578 return rcStrict;
579}
580
581
582/**
583 * Implements pushf.
584 *
585 *
586 * @param enmEffOpSize The effective operand size.
587 */
588IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
589{
590 VBOXSTRICTRC rcStrict;
591
592 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
593 {
594 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
595 IEM_SVM_UPDATE_NRIP(pVCpu);
596 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
597 }
598
599 /*
600 * If we're in V8086 mode some care is required (which is why we're in
601 * doing this in a C implementation).
602 */
603 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
604 if ( (fEfl & X86_EFL_VM)
605 && X86_EFL_GET_IOPL(fEfl) != 3 )
606 {
607 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE);
608 if ( enmEffOpSize != IEMMODE_16BIT
609 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
610 return iemRaiseGeneralProtectionFault0(pVCpu);
611 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
612 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
613 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
614 }
615 else
616 {
617
618 /*
619 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
620 */
621 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
622
623 switch (enmEffOpSize)
624 {
625 case IEMMODE_16BIT:
626 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
627 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
628 fEfl |= UINT16_C(0xf000);
629 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
630 break;
631 case IEMMODE_32BIT:
632 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
633 break;
634 case IEMMODE_64BIT:
635 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
636 break;
637 IEM_NOT_REACHED_DEFAULT_CASE_RET();
638 }
639 }
640 if (rcStrict != VINF_SUCCESS)
641 return rcStrict;
642
643 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
644 return VINF_SUCCESS;
645}
646
647
648/**
649 * Implements popf.
650 *
651 * @param enmEffOpSize The effective operand size.
652 */
653IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
654{
655 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu);
656 VBOXSTRICTRC rcStrict;
657 uint32_t fEflNew;
658
659 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
660 {
661 Log2(("popf: Guest intercept -> #VMEXIT\n"));
662 IEM_SVM_UPDATE_NRIP(pVCpu);
663 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
664 }
665
666 /*
667 * V8086 is special as usual.
668 */
669 if (fEflOld & X86_EFL_VM)
670 {
671 /*
672 * Almost anything goes if IOPL is 3.
673 */
674 if (X86_EFL_GET_IOPL(fEflOld) == 3)
675 {
676 switch (enmEffOpSize)
677 {
678 case IEMMODE_16BIT:
679 {
680 uint16_t u16Value;
681 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
682 if (rcStrict != VINF_SUCCESS)
683 return rcStrict;
684 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
685 break;
686 }
687 case IEMMODE_32BIT:
688 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
689 if (rcStrict != VINF_SUCCESS)
690 return rcStrict;
691 break;
692 IEM_NOT_REACHED_DEFAULT_CASE_RET();
693 }
694
695 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
696 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
697 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
698 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
699 }
700 /*
701 * Interrupt flag virtualization with CR4.VME=1.
702 */
703 else if ( enmEffOpSize == IEMMODE_16BIT
704 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
705 {
706 uint16_t u16Value;
707 RTUINT64U TmpRsp;
708 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
709 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
710 if (rcStrict != VINF_SUCCESS)
711 return rcStrict;
712
713 /** @todo Is the popf VME \#GP(0) delivered after updating RSP+RIP
714 * or before? */
715 if ( ( (u16Value & X86_EFL_IF)
716 && (fEflOld & X86_EFL_VIP))
717 || (u16Value & X86_EFL_TF) )
718 return iemRaiseGeneralProtectionFault0(pVCpu);
719
720 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
721 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
722 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
723 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
724
725 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
726 }
727 else
728 return iemRaiseGeneralProtectionFault0(pVCpu);
729
730 }
731 /*
732 * Not in V8086 mode.
733 */
734 else
735 {
736 /* Pop the flags. */
737 switch (enmEffOpSize)
738 {
739 case IEMMODE_16BIT:
740 {
741 uint16_t u16Value;
742 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
743 if (rcStrict != VINF_SUCCESS)
744 return rcStrict;
745 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
746
747 /*
748 * Ancient CPU adjustments:
749 * - 8086, 80186, V20/30:
750 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
751 * practical reasons (masking below). We add them when pushing flags.
752 * - 80286:
753 * The NT and IOPL flags cannot be popped from real mode and are
754 * therefore always zero (since a 286 can never exit from PM and
755 * their initial value is zero). This changed on a 386 and can
756 * therefore be used to detect 286 or 386 CPU in real mode.
757 */
758 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
759 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
760 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
761 break;
762 }
763 case IEMMODE_32BIT:
764 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
765 if (rcStrict != VINF_SUCCESS)
766 return rcStrict;
767 break;
768 case IEMMODE_64BIT:
769 {
770 uint64_t u64Value;
771 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
772 if (rcStrict != VINF_SUCCESS)
773 return rcStrict;
774 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
775 break;
776 }
777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
778 }
779
780 /* Merge them with the current flags. */
781 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
782 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
783 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
784 || pVCpu->iem.s.uCpl == 0)
785 {
786 fEflNew &= fPopfBits;
787 fEflNew |= ~fPopfBits & fEflOld;
788 }
789 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld))
790 {
791 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
792 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
793 }
794 else
795 {
796 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
797 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
798 }
799 }
800
801 /*
802 * Commit the flags.
803 */
804 Assert(fEflNew & RT_BIT_32(1));
805 IEMMISC_SET_EFL(pVCpu, fEflNew);
806 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
807
808 return VINF_SUCCESS;
809}
810
811
812/**
813 * Implements an indirect call.
814 *
815 * @param uNewPC The new program counter (RIP) value (loaded from the
816 * operand).
817 */
818IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
819{
820 uint16_t uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
821 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit)
822 return iemRaiseGeneralProtectionFault0(pVCpu);
823
824 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
825 if (rcStrict != VINF_SUCCESS)
826 return rcStrict;
827
828 pVCpu->cpum.GstCtx.rip = uNewPC;
829 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
830
831#ifndef IEM_WITH_CODE_TLB
832 /* Flush the prefetch buffer. */
833 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
834#endif
835 return VINF_SUCCESS;
836}
837
838
839/**
840 * Implements a 16-bit relative call.
841 *
842 * @param offDisp The displacment offset.
843 */
844IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
845{
846 uint16_t uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
847 uint16_t uNewPC = uOldPC + offDisp;
848 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit)
849 return iemRaiseGeneralProtectionFault0(pVCpu);
850
851 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
852 if (rcStrict != VINF_SUCCESS)
853 return rcStrict;
854
855 pVCpu->cpum.GstCtx.rip = uNewPC;
856 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
857
858#ifndef IEM_WITH_CODE_TLB
859 /* Flush the prefetch buffer. */
860 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
861#endif
862 return VINF_SUCCESS;
863}
864
865
866/**
867 * Implements a 32-bit indirect call.
868 *
869 * @param uNewPC The new program counter (RIP) value (loaded from the
870 * operand).
871 */
872IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
873{
874 uint32_t uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
875 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit)
876 return iemRaiseGeneralProtectionFault0(pVCpu);
877
878 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
879 if (rcStrict != VINF_SUCCESS)
880 return rcStrict;
881
882 pVCpu->cpum.GstCtx.rip = uNewPC;
883 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
884
885#ifndef IEM_WITH_CODE_TLB
886 /* Flush the prefetch buffer. */
887 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
888#endif
889 return VINF_SUCCESS;
890}
891
892
893/**
894 * Implements a 32-bit relative call.
895 *
896 * @param offDisp The displacment offset.
897 */
898IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
899{
900 uint32_t uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
901 uint32_t uNewPC = uOldPC + offDisp;
902 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit)
903 return iemRaiseGeneralProtectionFault0(pVCpu);
904
905 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
906 if (rcStrict != VINF_SUCCESS)
907 return rcStrict;
908
909 pVCpu->cpum.GstCtx.rip = uNewPC;
910 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
911
912#ifndef IEM_WITH_CODE_TLB
913 /* Flush the prefetch buffer. */
914 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
915#endif
916 return VINF_SUCCESS;
917}
918
919
920/**
921 * Implements a 64-bit indirect call.
922 *
923 * @param uNewPC The new program counter (RIP) value (loaded from the
924 * operand).
925 */
926IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
927{
928 uint64_t uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
929 if (!IEM_IS_CANONICAL(uNewPC))
930 return iemRaiseGeneralProtectionFault0(pVCpu);
931
932 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
933 if (rcStrict != VINF_SUCCESS)
934 return rcStrict;
935
936 pVCpu->cpum.GstCtx.rip = uNewPC;
937 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
938
939#ifndef IEM_WITH_CODE_TLB
940 /* Flush the prefetch buffer. */
941 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
942#endif
943 return VINF_SUCCESS;
944}
945
946
947/**
948 * Implements a 64-bit relative call.
949 *
950 * @param offDisp The displacment offset.
951 */
952IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
953{
954 uint64_t uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
955 uint64_t uNewPC = uOldPC + offDisp;
956 if (!IEM_IS_CANONICAL(uNewPC))
957 return iemRaiseNotCanonical(pVCpu);
958
959 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
960 if (rcStrict != VINF_SUCCESS)
961 return rcStrict;
962
963 pVCpu->cpum.GstCtx.rip = uNewPC;
964 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
965
966#ifndef IEM_WITH_CODE_TLB
967 /* Flush the prefetch buffer. */
968 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
969#endif
970
971 return VINF_SUCCESS;
972}
973
974
975/**
976 * Implements far jumps and calls thru task segments (TSS).
977 *
978 * @param uSel The selector.
979 * @param enmBranch The kind of branching we're performing.
980 * @param enmEffOpSize The effective operand size.
981 * @param pDesc The descriptor corresponding to @a uSel. The type is
982 * task gate.
983 */
984IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
985{
986#ifndef IEM_IMPLEMENTS_TASKSWITCH
987 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
988#else
989 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
990 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
991 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
992 RT_NOREF_PV(enmEffOpSize);
993 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
994
995 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
996 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
997 {
998 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
999 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1000 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1001 }
1002
1003 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1004 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1005 * checked here, need testcases. */
1006 if (!pDesc->Legacy.Gen.u1Present)
1007 {
1008 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1009 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1010 }
1011
1012 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1013 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1014 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1015#endif
1016}
1017
1018
1019/**
1020 * Implements far jumps and calls thru task gates.
1021 *
1022 * @param uSel The selector.
1023 * @param enmBranch The kind of branching we're performing.
1024 * @param enmEffOpSize The effective operand size.
1025 * @param pDesc The descriptor corresponding to @a uSel. The type is
1026 * task gate.
1027 */
1028IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1029{
1030#ifndef IEM_IMPLEMENTS_TASKSWITCH
1031 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1032#else
1033 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1034 RT_NOREF_PV(enmEffOpSize);
1035 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1036
1037 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1038 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1039 {
1040 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1041 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1042 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1043 }
1044
1045 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1046 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1047 * checked here, need testcases. */
1048 if (!pDesc->Legacy.Gen.u1Present)
1049 {
1050 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1051 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1052 }
1053
1054 /*
1055 * Fetch the new TSS descriptor from the GDT.
1056 */
1057 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1058 if (uSelTss & X86_SEL_LDT)
1059 {
1060 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1061 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1062 }
1063
1064 IEMSELDESC TssDesc;
1065 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1066 if (rcStrict != VINF_SUCCESS)
1067 return rcStrict;
1068
1069 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1070 {
1071 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1072 TssDesc.Legacy.Gate.u4Type));
1073 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1074 }
1075
1076 if (!TssDesc.Legacy.Gate.u1Present)
1077 {
1078 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1079 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1080 }
1081
1082 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1083 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1084 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1085#endif
1086}
1087
1088
1089/**
1090 * Implements far jumps and calls thru call gates.
1091 *
1092 * @param uSel The selector.
1093 * @param enmBranch The kind of branching we're performing.
1094 * @param enmEffOpSize The effective operand size.
1095 * @param pDesc The descriptor corresponding to @a uSel. The type is
1096 * call gate.
1097 */
1098IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1099{
1100#define IEM_IMPLEMENTS_CALLGATE
1101#ifndef IEM_IMPLEMENTS_CALLGATE
1102 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1103#else
1104 RT_NOREF_PV(enmEffOpSize);
1105 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1106
1107 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1108 * inter-privilege calls and are much more complex.
1109 *
1110 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1111 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1112 * must be 16-bit or 32-bit.
1113 */
1114 /** @todo effective operand size is probably irrelevant here, only the
1115 * call gate bitness matters??
1116 */
1117 VBOXSTRICTRC rcStrict;
1118 RTPTRUNION uPtrRet;
1119 uint64_t uNewRsp;
1120 uint64_t uNewRip;
1121 uint64_t u64Base;
1122 uint32_t cbLimit;
1123 RTSEL uNewCS;
1124 IEMSELDESC DescCS;
1125
1126 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1127 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1128 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1129 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1130
1131 /* Determine the new instruction pointer from the gate descriptor. */
1132 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1133 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1134 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1135
1136 /* Perform DPL checks on the gate descriptor. */
1137 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1138 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1139 {
1140 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1141 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1142 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1143 }
1144
1145 /** @todo does this catch NULL selectors, too? */
1146 if (!pDesc->Legacy.Gen.u1Present)
1147 {
1148 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1149 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1150 }
1151
1152 /*
1153 * Fetch the target CS descriptor from the GDT or LDT.
1154 */
1155 uNewCS = pDesc->Legacy.Gate.u16Sel;
1156 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1157 if (rcStrict != VINF_SUCCESS)
1158 return rcStrict;
1159
1160 /* Target CS must be a code selector. */
1161 if ( !DescCS.Legacy.Gen.u1DescType
1162 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1163 {
1164 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1165 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1166 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1167 }
1168
1169 /* Privilege checks on target CS. */
1170 if (enmBranch == IEMBRANCH_JUMP)
1171 {
1172 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1173 {
1174 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1175 {
1176 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1177 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1178 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1179 }
1180 }
1181 else
1182 {
1183 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
1184 {
1185 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1186 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1187 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1188 }
1189 }
1190 }
1191 else
1192 {
1193 Assert(enmBranch == IEMBRANCH_CALL);
1194 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1195 {
1196 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1197 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1198 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1199 }
1200 }
1201
1202 /* Additional long mode checks. */
1203 if (IEM_IS_LONG_MODE(pVCpu))
1204 {
1205 if (!DescCS.Legacy.Gen.u1Long)
1206 {
1207 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1208 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1209 }
1210
1211 /* L vs D. */
1212 if ( DescCS.Legacy.Gen.u1Long
1213 && DescCS.Legacy.Gen.u1DefBig)
1214 {
1215 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1216 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1217 }
1218 }
1219
1220 if (!DescCS.Legacy.Gate.u1Present)
1221 {
1222 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1223 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1224 }
1225
1226 if (enmBranch == IEMBRANCH_JUMP)
1227 {
1228 /** @todo This is very similar to regular far jumps; merge! */
1229 /* Jumps are fairly simple... */
1230
1231 /* Chop the high bits off if 16-bit gate (Intel says so). */
1232 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1233 uNewRip = (uint16_t)uNewRip;
1234
1235 /* Limit check for non-long segments. */
1236 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1237 if (DescCS.Legacy.Gen.u1Long)
1238 u64Base = 0;
1239 else
1240 {
1241 if (uNewRip > cbLimit)
1242 {
1243 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1244 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1245 }
1246 u64Base = X86DESC_BASE(&DescCS.Legacy);
1247 }
1248
1249 /* Canonical address check. */
1250 if (!IEM_IS_CANONICAL(uNewRip))
1251 {
1252 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1253 return iemRaiseNotCanonical(pVCpu);
1254 }
1255
1256 /*
1257 * Ok, everything checked out fine. Now set the accessed bit before
1258 * committing the result into CS, CSHID and RIP.
1259 */
1260 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1261 {
1262 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1263 if (rcStrict != VINF_SUCCESS)
1264 return rcStrict;
1265 /** @todo check what VT-x and AMD-V does. */
1266 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1267 }
1268
1269 /* commit */
1270 pVCpu->cpum.GstCtx.rip = uNewRip;
1271 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1272 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1273 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1274 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1275 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1276 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1277 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1278 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1279 }
1280 else
1281 {
1282 Assert(enmBranch == IEMBRANCH_CALL);
1283 /* Calls are much more complicated. */
1284
1285 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl))
1286 {
1287 uint16_t offNewStack; /* Offset of new stack in TSS. */
1288 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1289 uint8_t uNewCSDpl;
1290 uint8_t cbWords;
1291 RTSEL uNewSS;
1292 RTSEL uOldSS;
1293 uint64_t uOldRsp;
1294 IEMSELDESC DescSS;
1295 RTPTRUNION uPtrTSS;
1296 RTGCPTR GCPtrTSS;
1297 RTPTRUNION uPtrParmWds;
1298 RTGCPTR GCPtrParmWds;
1299
1300 /* More privilege. This is the fun part. */
1301 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1302
1303 /*
1304 * Determine new SS:rSP from the TSS.
1305 */
1306 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
1307
1308 /* Figure out where the new stack pointer is stored in the TSS. */
1309 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1310 if (!IEM_IS_LONG_MODE(pVCpu))
1311 {
1312 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1313 {
1314 offNewStack = RT_UOFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1315 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1316 }
1317 else
1318 {
1319 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1320 offNewStack = RT_UOFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1321 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1322 }
1323 }
1324 else
1325 {
1326 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1327 offNewStack = RT_UOFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1328 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1329 }
1330
1331 /* Check against TSS limit. */
1332 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit)
1333 {
1334 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit));
1335 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel);
1336 }
1337
1338 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
1339 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0);
1340 if (rcStrict != VINF_SUCCESS)
1341 {
1342 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1343 return rcStrict;
1344 }
1345
1346 if (!IEM_IS_LONG_MODE(pVCpu))
1347 {
1348 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1349 {
1350 uNewRsp = uPtrTSS.pu32[0];
1351 uNewSS = uPtrTSS.pu16[2];
1352 }
1353 else
1354 {
1355 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1356 uNewRsp = uPtrTSS.pu16[0];
1357 uNewSS = uPtrTSS.pu16[1];
1358 }
1359 }
1360 else
1361 {
1362 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1363 /* SS will be a NULL selector, but that's valid. */
1364 uNewRsp = uPtrTSS.pu64[0];
1365 uNewSS = uNewCSDpl;
1366 }
1367
1368 /* Done with the TSS now. */
1369 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1370 if (rcStrict != VINF_SUCCESS)
1371 {
1372 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1373 return rcStrict;
1374 }
1375
1376 /* Only used outside of long mode. */
1377 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1378
1379 /* If EFER.LMA is 0, there's extra work to do. */
1380 if (!IEM_IS_LONG_MODE(pVCpu))
1381 {
1382 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1383 {
1384 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1385 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1386 }
1387
1388 /* Grab the new SS descriptor. */
1389 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1390 if (rcStrict != VINF_SUCCESS)
1391 return rcStrict;
1392
1393 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1394 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1395 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1396 {
1397 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1398 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1399 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1400 }
1401
1402 /* Ensure new SS is a writable data segment. */
1403 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1404 {
1405 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1406 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1407 }
1408
1409 if (!DescSS.Legacy.Gen.u1Present)
1410 {
1411 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1412 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1413 }
1414 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1415 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1416 else
1417 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1418 }
1419 else
1420 {
1421 /* Just grab the new (NULL) SS descriptor. */
1422 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1423 * like we do... */
1424 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1425 if (rcStrict != VINF_SUCCESS)
1426 return rcStrict;
1427
1428 cbNewStack = sizeof(uint64_t) * 4;
1429 }
1430
1431 /** @todo According to Intel, new stack is checked for enough space first,
1432 * then switched. According to AMD, the stack is switched first and
1433 * then pushes might fault!
1434 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1435 * incoming stack \#PF happens before actual stack switch. AMD is
1436 * either lying or implicitly assumes that new state is committed
1437 * only if and when an instruction doesn't fault.
1438 */
1439
1440 /** @todo According to AMD, CS is loaded first, then SS.
1441 * According to Intel, it's the other way around!?
1442 */
1443
1444 /** @todo Intel and AMD disagree on when exactly the CPL changes! */
1445
1446 /* Set the accessed bit before committing new SS. */
1447 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1448 {
1449 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1450 if (rcStrict != VINF_SUCCESS)
1451 return rcStrict;
1452 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1453 }
1454
1455 /* Remember the old SS:rSP and their linear address. */
1456 uOldSS = pVCpu->cpum.GstCtx.ss.Sel;
1457 uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
1458
1459 GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
1460
1461 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1462 or #PF, the former is not implemented in this workaround. */
1463 /** @todo Proper fix callgate target stack exceptions. */
1464 /** @todo testcase: Cover callgates with partially or fully inaccessible
1465 * target stacks. */
1466 void *pvNewFrame;
1467 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1468 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
1469 if (rcStrict != VINF_SUCCESS)
1470 {
1471 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1472 return rcStrict;
1473 }
1474 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1475 if (rcStrict != VINF_SUCCESS)
1476 {
1477 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1478 return rcStrict;
1479 }
1480
1481 /* Commit new SS:rSP. */
1482 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1483 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1484 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1485 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1486 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1487 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1488 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1489 pVCpu->iem.s.uCpl = uNewCSDpl; /** @todo is the parameter words accessed using the new CPL or the old CPL? */
1490 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1491 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1492
1493 /* At this point the stack access must not fail because new state was already committed. */
1494 /** @todo this can still fail due to SS.LIMIT not check. */
1495 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1496 IEM_IS_LONG_MODE(pVCpu) ? 7
1497 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
1498 &uPtrRet.pv, &uNewRsp);
1499 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1500 VERR_INTERNAL_ERROR_5);
1501
1502 if (!IEM_IS_LONG_MODE(pVCpu))
1503 {
1504 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1505 {
1506 if (cbWords)
1507 {
1508 /* Map the relevant chunk of the old stack. */
1509 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds,
1510 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1511 if (rcStrict != VINF_SUCCESS)
1512 {
1513 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1514 return rcStrict;
1515 }
1516
1517 /* Copy the parameter (d)words. */
1518 for (int i = 0; i < cbWords; ++i)
1519 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1520
1521 /* Unmap the old stack. */
1522 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1523 if (rcStrict != VINF_SUCCESS)
1524 {
1525 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1526 return rcStrict;
1527 }
1528 }
1529
1530 /* Push the old CS:rIP. */
1531 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1532 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1533
1534 /* Push the old SS:rSP. */
1535 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1536 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1537 }
1538 else
1539 {
1540 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1541
1542 if (cbWords)
1543 {
1544 /* Map the relevant chunk of the old stack. */
1545 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds,
1546 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1547 if (rcStrict != VINF_SUCCESS)
1548 {
1549 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1550 return rcStrict;
1551 }
1552
1553 /* Copy the parameter words. */
1554 for (int i = 0; i < cbWords; ++i)
1555 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1556
1557 /* Unmap the old stack. */
1558 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1559 if (rcStrict != VINF_SUCCESS)
1560 {
1561 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1562 return rcStrict;
1563 }
1564 }
1565
1566 /* Push the old CS:rIP. */
1567 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1568 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1569
1570 /* Push the old SS:rSP. */
1571 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1572 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1573 }
1574 }
1575 else
1576 {
1577 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1578
1579 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1580 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1581 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1582 uPtrRet.pu64[2] = uOldRsp;
1583 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1584 }
1585
1586 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1587 if (rcStrict != VINF_SUCCESS)
1588 {
1589 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1590 return rcStrict;
1591 }
1592
1593 /* Chop the high bits off if 16-bit gate (Intel says so). */
1594 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1595 uNewRip = (uint16_t)uNewRip;
1596
1597 /* Limit / canonical check. */
1598 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1599 if (!IEM_IS_LONG_MODE(pVCpu))
1600 {
1601 if (uNewRip > cbLimit)
1602 {
1603 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1604 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1605 }
1606 u64Base = X86DESC_BASE(&DescCS.Legacy);
1607 }
1608 else
1609 {
1610 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1611 if (!IEM_IS_CANONICAL(uNewRip))
1612 {
1613 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1614 return iemRaiseNotCanonical(pVCpu);
1615 }
1616 u64Base = 0;
1617 }
1618
1619 /*
1620 * Now set the accessed bit before
1621 * writing the return address to the stack and committing the result into
1622 * CS, CSHID and RIP.
1623 */
1624 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1625 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1626 {
1627 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1628 if (rcStrict != VINF_SUCCESS)
1629 return rcStrict;
1630 /** @todo check what VT-x and AMD-V does. */
1631 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1632 }
1633
1634 /* Commit new CS:rIP. */
1635 pVCpu->cpum.GstCtx.rip = uNewRip;
1636 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1637 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
1638 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1639 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1640 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1641 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1642 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1643 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1644 }
1645 else
1646 {
1647 /* Same privilege. */
1648 /** @todo This is very similar to regular far calls; merge! */
1649
1650 /* Check stack first - may #SS(0). */
1651 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1652 * 16-bit code cause a two or four byte CS to be pushed? */
1653 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1654 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1655 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1656 IEM_IS_LONG_MODE(pVCpu) ? 7
1657 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
1658 &uPtrRet.pv, &uNewRsp);
1659 if (rcStrict != VINF_SUCCESS)
1660 return rcStrict;
1661
1662 /* Chop the high bits off if 16-bit gate (Intel says so). */
1663 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1664 uNewRip = (uint16_t)uNewRip;
1665
1666 /* Limit / canonical check. */
1667 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1668 if (!IEM_IS_LONG_MODE(pVCpu))
1669 {
1670 if (uNewRip > cbLimit)
1671 {
1672 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1673 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1674 }
1675 u64Base = X86DESC_BASE(&DescCS.Legacy);
1676 }
1677 else
1678 {
1679 if (!IEM_IS_CANONICAL(uNewRip))
1680 {
1681 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1682 return iemRaiseNotCanonical(pVCpu);
1683 }
1684 u64Base = 0;
1685 }
1686
1687 /*
1688 * Now set the accessed bit before
1689 * writing the return address to the stack and committing the result into
1690 * CS, CSHID and RIP.
1691 */
1692 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1693 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1694 {
1695 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1696 if (rcStrict != VINF_SUCCESS)
1697 return rcStrict;
1698 /** @todo check what VT-x and AMD-V does. */
1699 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1700 }
1701
1702 /* stack */
1703 if (!IEM_IS_LONG_MODE(pVCpu))
1704 {
1705 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1706 {
1707 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1708 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1709 }
1710 else
1711 {
1712 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1713 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1714 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1715 }
1716 }
1717 else
1718 {
1719 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1720 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1721 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1722 }
1723
1724 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1725 if (rcStrict != VINF_SUCCESS)
1726 return rcStrict;
1727
1728 /* commit */
1729 pVCpu->cpum.GstCtx.rip = uNewRip;
1730 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1731 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
1732 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1733 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1734 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1735 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1736 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1737 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1738 }
1739 }
1740 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1741
1742 /* Flush the prefetch buffer. */
1743# ifdef IEM_WITH_CODE_TLB
1744 pVCpu->iem.s.pbInstrBuf = NULL;
1745# else
1746 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1747# endif
1748 return VINF_SUCCESS;
1749#endif
1750}
1751
1752
1753/**
1754 * Implements far jumps and calls thru system selectors.
1755 *
1756 * @param uSel The selector.
1757 * @param enmBranch The kind of branching we're performing.
1758 * @param enmEffOpSize The effective operand size.
1759 * @param pDesc The descriptor corresponding to @a uSel.
1760 */
1761IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1762{
1763 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1764 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1765 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1766
1767 if (IEM_IS_LONG_MODE(pVCpu))
1768 switch (pDesc->Legacy.Gen.u4Type)
1769 {
1770 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1771 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1772
1773 default:
1774 case AMD64_SEL_TYPE_SYS_LDT:
1775 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1776 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1777 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1778 case AMD64_SEL_TYPE_SYS_INT_GATE:
1779 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1780 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1781 }
1782
1783 switch (pDesc->Legacy.Gen.u4Type)
1784 {
1785 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1786 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1787 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1788
1789 case X86_SEL_TYPE_SYS_TASK_GATE:
1790 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1791
1792 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1793 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1794 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1795
1796 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1797 Log(("branch %04x -> busy 286 TSS\n", uSel));
1798 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1799
1800 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1801 Log(("branch %04x -> busy 386 TSS\n", uSel));
1802 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1803
1804 default:
1805 case X86_SEL_TYPE_SYS_LDT:
1806 case X86_SEL_TYPE_SYS_286_INT_GATE:
1807 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1808 case X86_SEL_TYPE_SYS_386_INT_GATE:
1809 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1810 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1811 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1812 }
1813}
1814
1815
1816/**
1817 * Implements far jumps.
1818 *
1819 * @param uSel The selector.
1820 * @param offSeg The segment offset.
1821 * @param enmEffOpSize The effective operand size.
1822 */
1823IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1824{
1825 NOREF(cbInstr);
1826 Assert(offSeg <= UINT32_MAX);
1827
1828 /*
1829 * Real mode and V8086 mode are easy. The only snag seems to be that
1830 * CS.limit doesn't change and the limit check is done against the current
1831 * limit.
1832 */
1833 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1834 * 1998) that up to and including the Intel 486, far control
1835 * transfers in real mode set default CS attributes (0x93) and also
1836 * set a 64K segment limit. Starting with the Pentium, the
1837 * attributes and limit are left alone but the access rights are
1838 * ignored. We only implement the Pentium+ behavior.
1839 * */
1840 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1841 {
1842 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1843 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit)
1844 {
1845 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1846 return iemRaiseGeneralProtectionFault0(pVCpu);
1847 }
1848
1849 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1850 pVCpu->cpum.GstCtx.rip = offSeg;
1851 else
1852 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX;
1853 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1854 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1855 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1856 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1857 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1858 return VINF_SUCCESS;
1859 }
1860
1861 /*
1862 * Protected mode. Need to parse the specified descriptor...
1863 */
1864 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1865 {
1866 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1867 return iemRaiseGeneralProtectionFault0(pVCpu);
1868 }
1869
1870 /* Fetch the descriptor. */
1871 IEMSELDESC Desc;
1872 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1873 if (rcStrict != VINF_SUCCESS)
1874 return rcStrict;
1875
1876 /* Is it there? */
1877 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1878 {
1879 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1880 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1881 }
1882
1883 /*
1884 * Deal with it according to its type. We do the standard code selectors
1885 * here and dispatch the system selectors to worker functions.
1886 */
1887 if (!Desc.Legacy.Gen.u1DescType)
1888 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1889
1890 /* Only code segments. */
1891 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1892 {
1893 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1894 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1895 }
1896
1897 /* L vs D. */
1898 if ( Desc.Legacy.Gen.u1Long
1899 && Desc.Legacy.Gen.u1DefBig
1900 && IEM_IS_LONG_MODE(pVCpu))
1901 {
1902 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1903 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1904 }
1905
1906 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1907 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1908 {
1909 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
1910 {
1911 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1912 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1913 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1914 }
1915 }
1916 else
1917 {
1918 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
1919 {
1920 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1921 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1922 }
1923 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
1924 {
1925 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
1926 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1927 }
1928 }
1929
1930 /* Chop the high bits if 16-bit (Intel says so). */
1931 if (enmEffOpSize == IEMMODE_16BIT)
1932 offSeg &= UINT16_MAX;
1933
1934 /* Limit check. (Should alternatively check for non-canonical addresses
1935 here, but that is ruled out by offSeg being 32-bit, right?) */
1936 uint64_t u64Base;
1937 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1938 if (Desc.Legacy.Gen.u1Long)
1939 u64Base = 0;
1940 else
1941 {
1942 if (offSeg > cbLimit)
1943 {
1944 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1945 /** @todo Intel says this is \#GP(0)! */
1946 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1947 }
1948 u64Base = X86DESC_BASE(&Desc.Legacy);
1949 }
1950
1951 /*
1952 * Ok, everything checked out fine. Now set the accessed bit before
1953 * committing the result into CS, CSHID and RIP.
1954 */
1955 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1956 {
1957 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1958 if (rcStrict != VINF_SUCCESS)
1959 return rcStrict;
1960 /** @todo check what VT-x and AMD-V does. */
1961 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1962 }
1963
1964 /* commit */
1965 pVCpu->cpum.GstCtx.rip = offSeg;
1966 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1967 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1968 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1969 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1970 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1971 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1972 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1973 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1974 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1975 /** @todo check if the hidden bits are loaded correctly for 64-bit
1976 * mode. */
1977
1978 /* Flush the prefetch buffer. */
1979#ifdef IEM_WITH_CODE_TLB
1980 pVCpu->iem.s.pbInstrBuf = NULL;
1981#else
1982 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1983#endif
1984
1985 return VINF_SUCCESS;
1986}
1987
1988
1989/**
1990 * Implements far calls.
1991 *
1992 * This very similar to iemCImpl_FarJmp.
1993 *
1994 * @param uSel The selector.
1995 * @param offSeg The segment offset.
1996 * @param enmEffOpSize The operand size (in case we need it).
1997 */
1998IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1999{
2000 VBOXSTRICTRC rcStrict;
2001 uint64_t uNewRsp;
2002 RTPTRUNION uPtrRet;
2003
2004 /*
2005 * Real mode and V8086 mode are easy. The only snag seems to be that
2006 * CS.limit doesn't change and the limit check is done against the current
2007 * limit.
2008 */
2009 /** @todo See comment for similar code in iemCImpl_FarJmp */
2010 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2011 {
2012 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
2013
2014 /* Check stack first - may #SS(0). */
2015 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2016 enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2017 &uPtrRet.pv, &uNewRsp);
2018 if (rcStrict != VINF_SUCCESS)
2019 return rcStrict;
2020
2021 /* Check the target address range. */
2022 if (offSeg > UINT32_MAX)
2023 return iemRaiseGeneralProtectionFault0(pVCpu);
2024
2025 /* Everything is fine, push the return address. */
2026 if (enmEffOpSize == IEMMODE_16BIT)
2027 {
2028 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2029 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2030 }
2031 else
2032 {
2033 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2034 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
2035 }
2036 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2037 if (rcStrict != VINF_SUCCESS)
2038 return rcStrict;
2039
2040 /* Branch. */
2041 pVCpu->cpum.GstCtx.rip = offSeg;
2042 pVCpu->cpum.GstCtx.cs.Sel = uSel;
2043 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
2044 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2045 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
2046 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2047 return VINF_SUCCESS;
2048 }
2049
2050 /*
2051 * Protected mode. Need to parse the specified descriptor...
2052 */
2053 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2054 {
2055 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2056 return iemRaiseGeneralProtectionFault0(pVCpu);
2057 }
2058
2059 /* Fetch the descriptor. */
2060 IEMSELDESC Desc;
2061 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2062 if (rcStrict != VINF_SUCCESS)
2063 return rcStrict;
2064
2065 /*
2066 * Deal with it according to its type. We do the standard code selectors
2067 * here and dispatch the system selectors to worker functions.
2068 */
2069 if (!Desc.Legacy.Gen.u1DescType)
2070 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2071
2072 /* Only code segments. */
2073 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2074 {
2075 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2076 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2077 }
2078
2079 /* L vs D. */
2080 if ( Desc.Legacy.Gen.u1Long
2081 && Desc.Legacy.Gen.u1DefBig
2082 && IEM_IS_LONG_MODE(pVCpu))
2083 {
2084 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2085 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2086 }
2087
2088 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2089 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2090 {
2091 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
2092 {
2093 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2094 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2095 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2096 }
2097 }
2098 else
2099 {
2100 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
2101 {
2102 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2103 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2104 }
2105 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
2106 {
2107 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
2108 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2109 }
2110 }
2111
2112 /* Is it there? */
2113 if (!Desc.Legacy.Gen.u1Present)
2114 {
2115 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2116 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2117 }
2118
2119 /* Check stack first - may #SS(0). */
2120 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2121 * 16-bit code cause a two or four byte CS to be pushed? */
2122 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2123 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2124 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2125 &uPtrRet.pv, &uNewRsp);
2126 if (rcStrict != VINF_SUCCESS)
2127 return rcStrict;
2128
2129 /* Chop the high bits if 16-bit (Intel says so). */
2130 if (enmEffOpSize == IEMMODE_16BIT)
2131 offSeg &= UINT16_MAX;
2132
2133 /* Limit / canonical check. */
2134 uint64_t u64Base;
2135 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2136 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2137 {
2138 if (!IEM_IS_CANONICAL(offSeg))
2139 {
2140 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2141 return iemRaiseNotCanonical(pVCpu);
2142 }
2143 u64Base = 0;
2144 }
2145 else
2146 {
2147 if (offSeg > cbLimit)
2148 {
2149 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2150 /** @todo Intel says this is \#GP(0)! */
2151 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2152 }
2153 u64Base = X86DESC_BASE(&Desc.Legacy);
2154 }
2155
2156 /*
2157 * Now set the accessed bit before
2158 * writing the return address to the stack and committing the result into
2159 * CS, CSHID and RIP.
2160 */
2161 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2162 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2163 {
2164 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2165 if (rcStrict != VINF_SUCCESS)
2166 return rcStrict;
2167 /** @todo check what VT-x and AMD-V does. */
2168 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2169 }
2170
2171 /* stack */
2172 if (enmEffOpSize == IEMMODE_16BIT)
2173 {
2174 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2175 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2176 }
2177 else if (enmEffOpSize == IEMMODE_32BIT)
2178 {
2179 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2180 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2181 }
2182 else
2183 {
2184 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
2185 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2186 }
2187 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2188 if (rcStrict != VINF_SUCCESS)
2189 return rcStrict;
2190
2191 /* commit */
2192 pVCpu->cpum.GstCtx.rip = offSeg;
2193 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2194 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
2195 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2196 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2197 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2198 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2199 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2200 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2201 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2202 /** @todo check if the hidden bits are loaded correctly for 64-bit
2203 * mode. */
2204
2205 /* Flush the prefetch buffer. */
2206#ifdef IEM_WITH_CODE_TLB
2207 pVCpu->iem.s.pbInstrBuf = NULL;
2208#else
2209 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2210#endif
2211 return VINF_SUCCESS;
2212}
2213
2214
2215/**
2216 * Implements retf.
2217 *
2218 * @param enmEffOpSize The effective operand size.
2219 * @param cbPop The amount of arguments to pop from the stack
2220 * (bytes).
2221 */
2222IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2223{
2224 VBOXSTRICTRC rcStrict;
2225 RTCPTRUNION uPtrFrame;
2226 uint64_t uNewRsp;
2227 uint64_t uNewRip;
2228 uint16_t uNewCs;
2229 NOREF(cbInstr);
2230
2231 /*
2232 * Read the stack values first.
2233 */
2234 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2235 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2236 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
2237 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
2238 &uPtrFrame.pv, &uNewRsp);
2239 if (rcStrict != VINF_SUCCESS)
2240 return rcStrict;
2241 if (enmEffOpSize == IEMMODE_16BIT)
2242 {
2243 uNewRip = uPtrFrame.pu16[0];
2244 uNewCs = uPtrFrame.pu16[1];
2245 }
2246 else if (enmEffOpSize == IEMMODE_32BIT)
2247 {
2248 uNewRip = uPtrFrame.pu32[0];
2249 uNewCs = uPtrFrame.pu16[2];
2250 }
2251 else
2252 {
2253 uNewRip = uPtrFrame.pu64[0];
2254 uNewCs = uPtrFrame.pu16[4];
2255 }
2256 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2257 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2258 { /* extremely likely */ }
2259 else
2260 return rcStrict;
2261
2262 /*
2263 * Real mode and V8086 mode are easy.
2264 */
2265 /** @todo See comment for similar code in iemCImpl_FarJmp */
2266 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2267 {
2268 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2269 /** @todo check how this is supposed to work if sp=0xfffe. */
2270
2271 /* Check the limit of the new EIP. */
2272 /** @todo Intel pseudo code only does the limit check for 16-bit
2273 * operands, AMD does not make any distinction. What is right? */
2274 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
2275 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2276
2277 /* commit the operation. */
2278 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2279 pVCpu->cpum.GstCtx.rip = uNewRip;
2280 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2281 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2282 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2283 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2284 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2285 if (cbPop)
2286 iemRegAddToRsp(pVCpu, cbPop);
2287 return VINF_SUCCESS;
2288 }
2289
2290 /*
2291 * Protected mode is complicated, of course.
2292 */
2293 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2294 {
2295 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2296 return iemRaiseGeneralProtectionFault0(pVCpu);
2297 }
2298
2299 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2300
2301 /* Fetch the descriptor. */
2302 IEMSELDESC DescCs;
2303 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2304 if (rcStrict != VINF_SUCCESS)
2305 return rcStrict;
2306
2307 /* Can only return to a code selector. */
2308 if ( !DescCs.Legacy.Gen.u1DescType
2309 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2310 {
2311 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2312 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2313 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2314 }
2315
2316 /* L vs D. */
2317 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2318 && DescCs.Legacy.Gen.u1DefBig
2319 && IEM_IS_LONG_MODE(pVCpu))
2320 {
2321 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2322 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2323 }
2324
2325 /* DPL/RPL/CPL checks. */
2326 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
2327 {
2328 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
2329 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2330 }
2331
2332 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2333 {
2334 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2335 {
2336 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2337 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2338 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2339 }
2340 }
2341 else
2342 {
2343 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2344 {
2345 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2346 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2347 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2348 }
2349 }
2350
2351 /* Is it there? */
2352 if (!DescCs.Legacy.Gen.u1Present)
2353 {
2354 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2355 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2356 }
2357
2358 /*
2359 * Return to outer privilege? (We'll typically have entered via a call gate.)
2360 */
2361 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
2362 {
2363 /* Read the outer stack pointer stored *after* the parameters. */
2364 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, uNewRsp);
2365 if (rcStrict != VINF_SUCCESS)
2366 return rcStrict;
2367
2368 uint16_t uNewOuterSs;
2369 uint64_t uNewOuterRsp;
2370 if (enmEffOpSize == IEMMODE_16BIT)
2371 {
2372 uNewOuterRsp = uPtrFrame.pu16[0];
2373 uNewOuterSs = uPtrFrame.pu16[1];
2374 }
2375 else if (enmEffOpSize == IEMMODE_32BIT)
2376 {
2377 uNewOuterRsp = uPtrFrame.pu32[0];
2378 uNewOuterSs = uPtrFrame.pu16[2];
2379 }
2380 else
2381 {
2382 uNewOuterRsp = uPtrFrame.pu64[0];
2383 uNewOuterSs = uPtrFrame.pu16[4];
2384 }
2385 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2386 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2387 { /* extremely likely */ }
2388 else
2389 return rcStrict;
2390
2391 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2392 and read the selector. */
2393 IEMSELDESC DescSs;
2394 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2395 {
2396 if ( !DescCs.Legacy.Gen.u1Long
2397 || (uNewOuterSs & X86_SEL_RPL) == 3)
2398 {
2399 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2400 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2401 return iemRaiseGeneralProtectionFault0(pVCpu);
2402 }
2403 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2404 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2405 }
2406 else
2407 {
2408 /* Fetch the descriptor for the new stack segment. */
2409 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2410 if (rcStrict != VINF_SUCCESS)
2411 return rcStrict;
2412 }
2413
2414 /* Check that RPL of stack and code selectors match. */
2415 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2416 {
2417 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2418 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2419 }
2420
2421 /* Must be a writable data segment. */
2422 if ( !DescSs.Legacy.Gen.u1DescType
2423 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2424 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2425 {
2426 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2427 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2428 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2429 }
2430
2431 /* L vs D. (Not mentioned by intel.) */
2432 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2433 && DescSs.Legacy.Gen.u1DefBig
2434 && IEM_IS_LONG_MODE(pVCpu))
2435 {
2436 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2437 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2438 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2439 }
2440
2441 /* DPL/RPL/CPL checks. */
2442 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2443 {
2444 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2445 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2446 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2447 }
2448
2449 /* Is it there? */
2450 if (!DescSs.Legacy.Gen.u1Present)
2451 {
2452 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2453 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2454 }
2455
2456 /* Calc SS limit.*/
2457 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2458
2459 /* Is RIP canonical or within CS.limit? */
2460 uint64_t u64Base;
2461 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2462
2463 /** @todo Testcase: Is this correct? */
2464 if ( DescCs.Legacy.Gen.u1Long
2465 && IEM_IS_LONG_MODE(pVCpu) )
2466 {
2467 if (!IEM_IS_CANONICAL(uNewRip))
2468 {
2469 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2470 return iemRaiseNotCanonical(pVCpu);
2471 }
2472 u64Base = 0;
2473 }
2474 else
2475 {
2476 if (uNewRip > cbLimitCs)
2477 {
2478 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2479 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2480 /** @todo Intel says this is \#GP(0)! */
2481 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2482 }
2483 u64Base = X86DESC_BASE(&DescCs.Legacy);
2484 }
2485
2486 /*
2487 * Now set the accessed bit before
2488 * writing the return address to the stack and committing the result into
2489 * CS, CSHID and RIP.
2490 */
2491 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2492 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2493 {
2494 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2495 if (rcStrict != VINF_SUCCESS)
2496 return rcStrict;
2497 /** @todo check what VT-x and AMD-V does. */
2498 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2499 }
2500 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2501 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2502 {
2503 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2504 if (rcStrict != VINF_SUCCESS)
2505 return rcStrict;
2506 /** @todo check what VT-x and AMD-V does. */
2507 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2508 }
2509
2510 /* commit */
2511 if (enmEffOpSize == IEMMODE_16BIT)
2512 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2513 else
2514 pVCpu->cpum.GstCtx.rip = uNewRip;
2515 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2516 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2517 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2518 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2519 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2520 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2521 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2522 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs;
2523 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs;
2524 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2525 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2526 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
2527 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2528 pVCpu->cpum.GstCtx.ss.u64Base = 0;
2529 else
2530 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2531 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2532 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewOuterRsp;
2533 else
2534 pVCpu->cpum.GstCtx.rsp = uNewOuterRsp;
2535
2536 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL);
2537 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
2538 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
2539 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
2540 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
2541
2542 /** @todo check if the hidden bits are loaded correctly for 64-bit
2543 * mode. */
2544
2545 if (cbPop)
2546 iemRegAddToRsp(pVCpu, cbPop);
2547 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2548
2549 /* Done! */
2550 }
2551 /*
2552 * Return to the same privilege level
2553 */
2554 else
2555 {
2556 /* Limit / canonical check. */
2557 uint64_t u64Base;
2558 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2559
2560 /** @todo Testcase: Is this correct? */
2561 if ( DescCs.Legacy.Gen.u1Long
2562 && IEM_IS_LONG_MODE(pVCpu) )
2563 {
2564 if (!IEM_IS_CANONICAL(uNewRip))
2565 {
2566 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2567 return iemRaiseNotCanonical(pVCpu);
2568 }
2569 u64Base = 0;
2570 }
2571 else
2572 {
2573 if (uNewRip > cbLimitCs)
2574 {
2575 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2576 /** @todo Intel says this is \#GP(0)! */
2577 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2578 }
2579 u64Base = X86DESC_BASE(&DescCs.Legacy);
2580 }
2581
2582 /*
2583 * Now set the accessed bit before
2584 * writing the return address to the stack and committing the result into
2585 * CS, CSHID and RIP.
2586 */
2587 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2588 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2589 {
2590 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2591 if (rcStrict != VINF_SUCCESS)
2592 return rcStrict;
2593 /** @todo check what VT-x and AMD-V does. */
2594 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2595 }
2596
2597 /* commit */
2598 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2599 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
2600 else
2601 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2602 if (enmEffOpSize == IEMMODE_16BIT)
2603 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2604 else
2605 pVCpu->cpum.GstCtx.rip = uNewRip;
2606 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2607 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2608 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2609 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2610 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2611 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2612 /** @todo check if the hidden bits are loaded correctly for 64-bit
2613 * mode. */
2614 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2615 if (cbPop)
2616 iemRegAddToRsp(pVCpu, cbPop);
2617 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2618 }
2619
2620 /* Flush the prefetch buffer. */
2621#ifdef IEM_WITH_CODE_TLB
2622 pVCpu->iem.s.pbInstrBuf = NULL;
2623#else
2624 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2625#endif
2626 return VINF_SUCCESS;
2627}
2628
2629
2630/**
2631 * Implements retn.
2632 *
2633 * We're doing this in C because of the \#GP that might be raised if the popped
2634 * program counter is out of bounds.
2635 *
2636 * @param enmEffOpSize The effective operand size.
2637 * @param cbPop The amount of arguments to pop from the stack
2638 * (bytes).
2639 */
2640IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2641{
2642 NOREF(cbInstr);
2643
2644 /* Fetch the RSP from the stack. */
2645 VBOXSTRICTRC rcStrict;
2646 RTUINT64U NewRip;
2647 RTUINT64U NewRsp;
2648 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2649
2650 switch (enmEffOpSize)
2651 {
2652 case IEMMODE_16BIT:
2653 NewRip.u = 0;
2654 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2655 break;
2656 case IEMMODE_32BIT:
2657 NewRip.u = 0;
2658 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2659 break;
2660 case IEMMODE_64BIT:
2661 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2662 break;
2663 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2664 }
2665 if (rcStrict != VINF_SUCCESS)
2666 return rcStrict;
2667
2668 /* Check the new RSP before loading it. */
2669 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2670 * of it. The canonical test is performed here and for call. */
2671 if (enmEffOpSize != IEMMODE_64BIT)
2672 {
2673 if (NewRip.DWords.dw0 > pVCpu->cpum.GstCtx.cs.u32Limit)
2674 {
2675 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
2676 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2677 }
2678 }
2679 else
2680 {
2681 if (!IEM_IS_CANONICAL(NewRip.u))
2682 {
2683 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2684 return iemRaiseNotCanonical(pVCpu);
2685 }
2686 }
2687
2688 /* Apply cbPop */
2689 if (cbPop)
2690 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2691
2692 /* Commit it. */
2693 pVCpu->cpum.GstCtx.rip = NewRip.u;
2694 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2695 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2696
2697 /* Flush the prefetch buffer. */
2698#ifndef IEM_WITH_CODE_TLB
2699 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2700#endif
2701
2702 return VINF_SUCCESS;
2703}
2704
2705
2706/**
2707 * Implements enter.
2708 *
2709 * We're doing this in C because the instruction is insane, even for the
2710 * u8NestingLevel=0 case dealing with the stack is tedious.
2711 *
2712 * @param enmEffOpSize The effective operand size.
2713 * @param cbFrame Frame size.
2714 * @param cParameters Frame parameter count.
2715 */
2716IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2717{
2718 /* Push RBP, saving the old value in TmpRbp. */
2719 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2720 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp;
2721 RTUINT64U NewRbp;
2722 VBOXSTRICTRC rcStrict;
2723 if (enmEffOpSize == IEMMODE_64BIT)
2724 {
2725 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2726 NewRbp = NewRsp;
2727 }
2728 else if (enmEffOpSize == IEMMODE_32BIT)
2729 {
2730 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2731 NewRbp = NewRsp;
2732 }
2733 else
2734 {
2735 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2736 NewRbp = TmpRbp;
2737 NewRbp.Words.w0 = NewRsp.Words.w0;
2738 }
2739 if (rcStrict != VINF_SUCCESS)
2740 return rcStrict;
2741
2742 /* Copy the parameters (aka nesting levels by Intel). */
2743 cParameters &= 0x1f;
2744 if (cParameters > 0)
2745 {
2746 switch (enmEffOpSize)
2747 {
2748 case IEMMODE_16BIT:
2749 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2750 TmpRbp.DWords.dw0 -= 2;
2751 else
2752 TmpRbp.Words.w0 -= 2;
2753 do
2754 {
2755 uint16_t u16Tmp;
2756 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2757 if (rcStrict != VINF_SUCCESS)
2758 break;
2759 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2760 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2761 break;
2762
2763 case IEMMODE_32BIT:
2764 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2765 TmpRbp.DWords.dw0 -= 4;
2766 else
2767 TmpRbp.Words.w0 -= 4;
2768 do
2769 {
2770 uint32_t u32Tmp;
2771 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2772 if (rcStrict != VINF_SUCCESS)
2773 break;
2774 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2775 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2776 break;
2777
2778 case IEMMODE_64BIT:
2779 TmpRbp.u -= 8;
2780 do
2781 {
2782 uint64_t u64Tmp;
2783 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2784 if (rcStrict != VINF_SUCCESS)
2785 break;
2786 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2787 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2788 break;
2789
2790 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2791 }
2792 if (rcStrict != VINF_SUCCESS)
2793 return VINF_SUCCESS;
2794
2795 /* Push the new RBP */
2796 if (enmEffOpSize == IEMMODE_64BIT)
2797 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2798 else if (enmEffOpSize == IEMMODE_32BIT)
2799 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2800 else
2801 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2802 if (rcStrict != VINF_SUCCESS)
2803 return rcStrict;
2804
2805 }
2806
2807 /* Recalc RSP. */
2808 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame);
2809
2810 /** @todo Should probe write access at the new RSP according to AMD. */
2811 /** @todo Should handle accesses to the VMX APIC-access page. */
2812
2813 /* Commit it. */
2814 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2815 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2816 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2817
2818 return VINF_SUCCESS;
2819}
2820
2821
2822
2823/**
2824 * Implements leave.
2825 *
2826 * We're doing this in C because messing with the stack registers is annoying
2827 * since they depends on SS attributes.
2828 *
2829 * @param enmEffOpSize The effective operand size.
2830 */
2831IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2832{
2833 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2834 RTUINT64U NewRsp;
2835 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2836 NewRsp.u = pVCpu->cpum.GstCtx.rbp;
2837 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2838 NewRsp.u = pVCpu->cpum.GstCtx.ebp;
2839 else
2840 {
2841 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2842 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2843 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp;
2844 }
2845
2846 /* Pop RBP according to the operand size. */
2847 VBOXSTRICTRC rcStrict;
2848 RTUINT64U NewRbp;
2849 switch (enmEffOpSize)
2850 {
2851 case IEMMODE_16BIT:
2852 NewRbp.u = pVCpu->cpum.GstCtx.rbp;
2853 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2854 break;
2855 case IEMMODE_32BIT:
2856 NewRbp.u = 0;
2857 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2858 break;
2859 case IEMMODE_64BIT:
2860 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2861 break;
2862 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2863 }
2864 if (rcStrict != VINF_SUCCESS)
2865 return rcStrict;
2866
2867
2868 /* Commit it. */
2869 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2870 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2871 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2872
2873 return VINF_SUCCESS;
2874}
2875
2876
2877/**
2878 * Implements int3 and int XX.
2879 *
2880 * @param u8Int The interrupt vector number.
2881 * @param enmInt The int instruction type.
2882 */
2883IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2884{
2885 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2886
2887 /*
2888 * We must check if this INT3 might belong to DBGF before raising a #BP.
2889 */
2890 if (u8Int == 3)
2891 {
2892 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2893 if (pVM->dbgf.ro.cEnabledInt3Breakpoints == 0)
2894 { /* likely: No vbox debugger breakpoints */ }
2895 else
2896 {
2897 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
2898 Log(("iemCImpl_int: DBGFTrap03Handler -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2899 if (rcStrict != VINF_EM_RAW_GUEST_TRAP)
2900 return iemSetPassUpStatus(pVCpu, rcStrict);
2901 }
2902 }
2903 return iemRaiseXcptOrInt(pVCpu,
2904 cbInstr,
2905 u8Int,
2906 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2907 0,
2908 0);
2909}
2910
2911
2912/**
2913 * Implements iret for real mode and V8086 mode.
2914 *
2915 * @param enmEffOpSize The effective operand size.
2916 */
2917IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2918{
2919 X86EFLAGS Efl;
2920 Efl.u = IEMMISC_GET_EFL(pVCpu);
2921 NOREF(cbInstr);
2922
2923 /*
2924 * iret throws an exception if VME isn't enabled.
2925 */
2926 if ( Efl.Bits.u1VM
2927 && Efl.Bits.u2IOPL != 3
2928 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
2929 return iemRaiseGeneralProtectionFault0(pVCpu);
2930
2931 /*
2932 * Do the stack bits, but don't commit RSP before everything checks
2933 * out right.
2934 */
2935 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2936 VBOXSTRICTRC rcStrict;
2937 RTCPTRUNION uFrame;
2938 uint16_t uNewCs;
2939 uint32_t uNewEip;
2940 uint32_t uNewFlags;
2941 uint64_t uNewRsp;
2942 if (enmEffOpSize == IEMMODE_32BIT)
2943 {
2944 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &uNewRsp);
2945 if (rcStrict != VINF_SUCCESS)
2946 return rcStrict;
2947 uNewEip = uFrame.pu32[0];
2948 if (uNewEip > UINT16_MAX)
2949 return iemRaiseGeneralProtectionFault0(pVCpu);
2950
2951 uNewCs = (uint16_t)uFrame.pu32[1];
2952 uNewFlags = uFrame.pu32[2];
2953 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2954 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2955 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2956 | X86_EFL_ID;
2957 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2958 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2959 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2960 }
2961 else
2962 {
2963 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
2964 if (rcStrict != VINF_SUCCESS)
2965 return rcStrict;
2966 uNewEip = uFrame.pu16[0];
2967 uNewCs = uFrame.pu16[1];
2968 uNewFlags = uFrame.pu16[2];
2969 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2970 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2971 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2972 /** @todo The intel pseudo code does not indicate what happens to
2973 * reserved flags. We just ignore them. */
2974 /* Ancient CPU adjustments: See iemCImpl_popf. */
2975 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2976 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2977 }
2978 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
2979 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2980 { /* extremely likely */ }
2981 else
2982 return rcStrict;
2983
2984 /** @todo Check how this is supposed to work if sp=0xfffe. */
2985 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2986 uNewCs, uNewEip, uNewFlags, uNewRsp));
2987
2988 /*
2989 * Check the limit of the new EIP.
2990 */
2991 /** @todo Only the AMD pseudo code check the limit here, what's
2992 * right? */
2993 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
2994 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2995
2996 /*
2997 * V8086 checks and flag adjustments
2998 */
2999 if (Efl.Bits.u1VM)
3000 {
3001 if (Efl.Bits.u2IOPL == 3)
3002 {
3003 /* Preserve IOPL and clear RF. */
3004 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
3005 uNewFlags |= Efl.u & (X86_EFL_IOPL);
3006 }
3007 else if ( enmEffOpSize == IEMMODE_16BIT
3008 && ( !(uNewFlags & X86_EFL_IF)
3009 || !Efl.Bits.u1VIP )
3010 && !(uNewFlags & X86_EFL_TF) )
3011 {
3012 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
3013 uNewFlags &= ~X86_EFL_VIF;
3014 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
3015 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
3016 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
3017 }
3018 else
3019 return iemRaiseGeneralProtectionFault0(pVCpu);
3020 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
3021 }
3022
3023 /*
3024 * Commit the operation.
3025 */
3026#ifdef DBGFTRACE_ENABLED
3027 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
3028 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
3029#endif
3030 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3031 pVCpu->cpum.GstCtx.rip = uNewEip;
3032 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3033 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3034 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3035 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
3036 /** @todo do we load attribs and limit as well? */
3037 Assert(uNewFlags & X86_EFL_1);
3038 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3039
3040 /* Flush the prefetch buffer. */
3041#ifdef IEM_WITH_CODE_TLB
3042 pVCpu->iem.s.pbInstrBuf = NULL;
3043#else
3044 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3045#endif
3046
3047 return VINF_SUCCESS;
3048}
3049
3050
3051/**
3052 * Loads a segment register when entering V8086 mode.
3053 *
3054 * @param pSReg The segment register.
3055 * @param uSeg The segment to load.
3056 */
3057static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3058{
3059 pSReg->Sel = uSeg;
3060 pSReg->ValidSel = uSeg;
3061 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3062 pSReg->u64Base = (uint32_t)uSeg << 4;
3063 pSReg->u32Limit = 0xffff;
3064 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3065 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3066 * IRET'ing to V8086. */
3067}
3068
3069
3070/**
3071 * Implements iret for protected mode returning to V8086 mode.
3072 *
3073 * @param uNewEip The new EIP.
3074 * @param uNewCs The new CS.
3075 * @param uNewFlags The new EFLAGS.
3076 * @param uNewRsp The RSP after the initial IRET frame.
3077 *
3078 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3079 */
3080IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp)
3081{
3082 RT_NOREF_PV(cbInstr);
3083 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
3084
3085 /*
3086 * Pop the V8086 specific frame bits off the stack.
3087 */
3088 VBOXSTRICTRC rcStrict;
3089 RTCPTRUNION uFrame;
3090 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, uNewRsp);
3091 if (rcStrict != VINF_SUCCESS)
3092 return rcStrict;
3093 uint32_t uNewEsp = uFrame.pu32[0];
3094 uint16_t uNewSs = uFrame.pu32[1];
3095 uint16_t uNewEs = uFrame.pu32[2];
3096 uint16_t uNewDs = uFrame.pu32[3];
3097 uint16_t uNewFs = uFrame.pu32[4];
3098 uint16_t uNewGs = uFrame.pu32[5];
3099 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3100 if (rcStrict != VINF_SUCCESS)
3101 return rcStrict;
3102
3103 /*
3104 * Commit the operation.
3105 */
3106 uNewFlags &= X86_EFL_LIVE_MASK;
3107 uNewFlags |= X86_EFL_RA1_MASK;
3108#ifdef DBGFTRACE_ENABLED
3109 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3110 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3111#endif
3112 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3113
3114 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3115 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs);
3116 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs);
3117 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs);
3118 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs);
3119 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs);
3120 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs);
3121 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip;
3122 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */
3123 pVCpu->iem.s.uCpl = 3;
3124
3125 /* Flush the prefetch buffer. */
3126#ifdef IEM_WITH_CODE_TLB
3127 pVCpu->iem.s.pbInstrBuf = NULL;
3128#else
3129 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3130#endif
3131
3132 return VINF_SUCCESS;
3133}
3134
3135
3136/**
3137 * Implements iret for protected mode returning via a nested task.
3138 *
3139 * @param enmEffOpSize The effective operand size.
3140 */
3141IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3142{
3143 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3144#ifndef IEM_IMPLEMENTS_TASKSWITCH
3145 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3146#else
3147 RT_NOREF_PV(enmEffOpSize);
3148
3149 /*
3150 * Read the segment selector in the link-field of the current TSS.
3151 */
3152 RTSEL uSelRet;
3153 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base);
3154 if (rcStrict != VINF_SUCCESS)
3155 return rcStrict;
3156
3157 /*
3158 * Fetch the returning task's TSS descriptor from the GDT.
3159 */
3160 if (uSelRet & X86_SEL_LDT)
3161 {
3162 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3163 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3164 }
3165
3166 IEMSELDESC TssDesc;
3167 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3168 if (rcStrict != VINF_SUCCESS)
3169 return rcStrict;
3170
3171 if (TssDesc.Legacy.Gate.u1DescType)
3172 {
3173 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3174 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3175 }
3176
3177 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3178 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3179 {
3180 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3181 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3182 }
3183
3184 if (!TssDesc.Legacy.Gate.u1Present)
3185 {
3186 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3187 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3188 }
3189
3190 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
3191 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3192 0 /* uCr2 */, uSelRet, &TssDesc);
3193#endif
3194}
3195
3196
3197/**
3198 * Implements iret for protected mode
3199 *
3200 * @param enmEffOpSize The effective operand size.
3201 */
3202IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3203{
3204 NOREF(cbInstr);
3205 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3206
3207 /*
3208 * Nested task return.
3209 */
3210 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3211 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3212
3213 /*
3214 * Normal return.
3215 *
3216 * Do the stack bits, but don't commit RSP before everything checks
3217 * out right.
3218 */
3219 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3220 VBOXSTRICTRC rcStrict;
3221 RTCPTRUNION uFrame;
3222 uint16_t uNewCs;
3223 uint32_t uNewEip;
3224 uint32_t uNewFlags;
3225 uint64_t uNewRsp;
3226 if (enmEffOpSize == IEMMODE_32BIT)
3227 {
3228 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &uNewRsp);
3229 if (rcStrict != VINF_SUCCESS)
3230 return rcStrict;
3231 uNewEip = uFrame.pu32[0];
3232 uNewCs = (uint16_t)uFrame.pu32[1];
3233 uNewFlags = uFrame.pu32[2];
3234 }
3235 else
3236 {
3237 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
3238 if (rcStrict != VINF_SUCCESS)
3239 return rcStrict;
3240 uNewEip = uFrame.pu16[0];
3241 uNewCs = uFrame.pu16[1];
3242 uNewFlags = uFrame.pu16[2];
3243 }
3244 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3245 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3246 { /* extremely likely */ }
3247 else
3248 return rcStrict;
3249 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, pVCpu->iem.s.uCpl));
3250
3251 /*
3252 * We're hopefully not returning to V8086 mode...
3253 */
3254 if ( (uNewFlags & X86_EFL_VM)
3255 && pVCpu->iem.s.uCpl == 0)
3256 {
3257 Assert(enmEffOpSize == IEMMODE_32BIT);
3258 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp);
3259 }
3260
3261 /*
3262 * Protected mode.
3263 */
3264 /* Read the CS descriptor. */
3265 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3266 {
3267 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3268 return iemRaiseGeneralProtectionFault0(pVCpu);
3269 }
3270
3271 IEMSELDESC DescCS;
3272 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3273 if (rcStrict != VINF_SUCCESS)
3274 {
3275 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3276 return rcStrict;
3277 }
3278
3279 /* Must be a code descriptor. */
3280 if (!DescCS.Legacy.Gen.u1DescType)
3281 {
3282 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3283 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3284 }
3285 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3286 {
3287 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3288 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3289 }
3290
3291 /* Privilege checks. */
3292 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3293 {
3294 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3295 {
3296 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3297 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3298 }
3299 }
3300 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3301 {
3302 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3303 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3304 }
3305 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3306 {
3307 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl));
3308 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3309 }
3310
3311 /* Present? */
3312 if (!DescCS.Legacy.Gen.u1Present)
3313 {
3314 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3315 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3316 }
3317
3318 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3319
3320 /*
3321 * Return to outer level?
3322 */
3323 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
3324 {
3325 uint16_t uNewSS;
3326 uint32_t uNewESP;
3327 if (enmEffOpSize == IEMMODE_32BIT)
3328 {
3329 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, uNewRsp);
3330 if (rcStrict != VINF_SUCCESS)
3331 return rcStrict;
3332/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3333 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3334 * bit of the popped SS selector it turns out. */
3335 uNewESP = uFrame.pu32[0];
3336 uNewSS = (uint16_t)uFrame.pu32[1];
3337 }
3338 else
3339 {
3340 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, uNewRsp);
3341 if (rcStrict != VINF_SUCCESS)
3342 return rcStrict;
3343 uNewESP = uFrame.pu16[0];
3344 uNewSS = uFrame.pu16[1];
3345 }
3346 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3347 if (rcStrict != VINF_SUCCESS)
3348 return rcStrict;
3349 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3350
3351 /* Read the SS descriptor. */
3352 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3353 {
3354 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3355 return iemRaiseGeneralProtectionFault0(pVCpu);
3356 }
3357
3358 IEMSELDESC DescSS;
3359 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3360 if (rcStrict != VINF_SUCCESS)
3361 {
3362 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3363 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3364 return rcStrict;
3365 }
3366
3367 /* Privilege checks. */
3368 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3369 {
3370 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3371 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3372 }
3373 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3374 {
3375 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3376 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3377 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3378 }
3379
3380 /* Must be a writeable data segment descriptor. */
3381 if (!DescSS.Legacy.Gen.u1DescType)
3382 {
3383 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3384 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3385 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3386 }
3387 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3388 {
3389 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3390 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3391 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3392 }
3393
3394 /* Present? */
3395 if (!DescSS.Legacy.Gen.u1Present)
3396 {
3397 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3398 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3399 }
3400
3401 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3402
3403 /* Check EIP. */
3404 if (uNewEip > cbLimitCS)
3405 {
3406 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3407 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3408 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3409 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3410 }
3411
3412 /*
3413 * Commit the changes, marking CS and SS accessed first since
3414 * that may fail.
3415 */
3416 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3417 {
3418 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3419 if (rcStrict != VINF_SUCCESS)
3420 return rcStrict;
3421 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3422 }
3423 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3424 {
3425 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3426 if (rcStrict != VINF_SUCCESS)
3427 return rcStrict;
3428 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3429 }
3430
3431 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3432 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3433 if (enmEffOpSize != IEMMODE_16BIT)
3434 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3435 if (pVCpu->iem.s.uCpl == 0)
3436 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3437 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3438 fEFlagsMask |= X86_EFL_IF;
3439 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3440 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3441 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3442 fEFlagsNew &= ~fEFlagsMask;
3443 fEFlagsNew |= uNewFlags & fEFlagsMask;
3444#ifdef DBGFTRACE_ENABLED
3445 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3446 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3447 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3448#endif
3449
3450 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3451 pVCpu->cpum.GstCtx.rip = uNewEip;
3452 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3453 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3454 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3455 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3456 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3457 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3458 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3459
3460 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3461 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3462 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3463 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3464 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3465 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3466 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3467 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP;
3468 else
3469 pVCpu->cpum.GstCtx.rsp = uNewESP;
3470
3471 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL;
3472 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
3473 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
3474 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
3475 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
3476
3477 /* Done! */
3478
3479 }
3480 /*
3481 * Return to the same level.
3482 */
3483 else
3484 {
3485 /* Check EIP. */
3486 if (uNewEip > cbLimitCS)
3487 {
3488 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3489 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3490 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3491 }
3492
3493 /*
3494 * Commit the changes, marking CS first since it may fail.
3495 */
3496 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3497 {
3498 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3499 if (rcStrict != VINF_SUCCESS)
3500 return rcStrict;
3501 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3502 }
3503
3504 X86EFLAGS NewEfl;
3505 NewEfl.u = IEMMISC_GET_EFL(pVCpu);
3506 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3507 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3508 if (enmEffOpSize != IEMMODE_16BIT)
3509 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3510 if (pVCpu->iem.s.uCpl == 0)
3511 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3512 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL)
3513 fEFlagsMask |= X86_EFL_IF;
3514 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3515 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3516 NewEfl.u &= ~fEFlagsMask;
3517 NewEfl.u |= fEFlagsMask & uNewFlags;
3518#ifdef DBGFTRACE_ENABLED
3519 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3520 pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3521 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp);
3522#endif
3523
3524 IEMMISC_SET_EFL(pVCpu, NewEfl.u);
3525 pVCpu->cpum.GstCtx.rip = uNewEip;
3526 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3527 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3528 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3529 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3530 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3531 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3532 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3533 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3534 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3535 else
3536 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3537 /* Done! */
3538 }
3539
3540 /* Flush the prefetch buffer. */
3541#ifdef IEM_WITH_CODE_TLB
3542 pVCpu->iem.s.pbInstrBuf = NULL;
3543#else
3544 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3545#endif
3546
3547 return VINF_SUCCESS;
3548}
3549
3550
3551/**
3552 * Implements iret for long mode
3553 *
3554 * @param enmEffOpSize The effective operand size.
3555 */
3556IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3557{
3558 NOREF(cbInstr);
3559
3560 /*
3561 * Nested task return is not supported in long mode.
3562 */
3563 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3564 {
3565 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u));
3566 return iemRaiseGeneralProtectionFault0(pVCpu);
3567 }
3568
3569 /*
3570 * Normal return.
3571 *
3572 * Do the stack bits, but don't commit RSP before everything checks
3573 * out right.
3574 */
3575 VBOXSTRICTRC rcStrict;
3576 RTCPTRUNION uFrame;
3577 uint64_t uNewRip;
3578 uint16_t uNewCs;
3579 uint16_t uNewSs;
3580 uint32_t uNewFlags;
3581 uint64_t uNewRsp;
3582 if (enmEffOpSize == IEMMODE_64BIT)
3583 {
3584 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &uNewRsp);
3585 if (rcStrict != VINF_SUCCESS)
3586 return rcStrict;
3587 uNewRip = uFrame.pu64[0];
3588 uNewCs = (uint16_t)uFrame.pu64[1];
3589 uNewFlags = (uint32_t)uFrame.pu64[2];
3590 uNewRsp = uFrame.pu64[3];
3591 uNewSs = (uint16_t)uFrame.pu64[4];
3592 }
3593 else if (enmEffOpSize == IEMMODE_32BIT)
3594 {
3595 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &uNewRsp);
3596 if (rcStrict != VINF_SUCCESS)
3597 return rcStrict;
3598 uNewRip = uFrame.pu32[0];
3599 uNewCs = (uint16_t)uFrame.pu32[1];
3600 uNewFlags = uFrame.pu32[2];
3601 uNewRsp = uFrame.pu32[3];
3602 uNewSs = (uint16_t)uFrame.pu32[4];
3603 }
3604 else
3605 {
3606 Assert(enmEffOpSize == IEMMODE_16BIT);
3607 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &uNewRsp);
3608 if (rcStrict != VINF_SUCCESS)
3609 return rcStrict;
3610 uNewRip = uFrame.pu16[0];
3611 uNewCs = uFrame.pu16[1];
3612 uNewFlags = uFrame.pu16[2];
3613 uNewRsp = uFrame.pu16[3];
3614 uNewSs = uFrame.pu16[4];
3615 }
3616 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3617 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3618 { /* extremely like */ }
3619 else
3620 return rcStrict;
3621 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3622
3623 /*
3624 * Check stuff.
3625 */
3626 /* Read the CS descriptor. */
3627 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3628 {
3629 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3630 return iemRaiseGeneralProtectionFault0(pVCpu);
3631 }
3632
3633 IEMSELDESC DescCS;
3634 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3635 if (rcStrict != VINF_SUCCESS)
3636 {
3637 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3638 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3639 return rcStrict;
3640 }
3641
3642 /* Must be a code descriptor. */
3643 if ( !DescCS.Legacy.Gen.u1DescType
3644 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3645 {
3646 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3647 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3648 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3649 }
3650
3651 /* Privilege checks. */
3652 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3653 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3654 {
3655 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3656 {
3657 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3658 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3659 }
3660 }
3661 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3662 {
3663 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3664 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3665 }
3666 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3667 {
3668 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
3669 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3670 }
3671
3672 /* Present? */
3673 if (!DescCS.Legacy.Gen.u1Present)
3674 {
3675 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3676 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3677 }
3678
3679 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3680
3681 /* Read the SS descriptor. */
3682 IEMSELDESC DescSS;
3683 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3684 {
3685 if ( !DescCS.Legacy.Gen.u1Long
3686 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3687 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3688 {
3689 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3690 return iemRaiseGeneralProtectionFault0(pVCpu);
3691 }
3692 DescSS.Legacy.u = 0;
3693 }
3694 else
3695 {
3696 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3697 if (rcStrict != VINF_SUCCESS)
3698 {
3699 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3700 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3701 return rcStrict;
3702 }
3703 }
3704
3705 /* Privilege checks. */
3706 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3707 {
3708 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3709 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3710 }
3711
3712 uint32_t cbLimitSs;
3713 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3714 cbLimitSs = UINT32_MAX;
3715 else
3716 {
3717 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3718 {
3719 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3720 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3721 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3722 }
3723
3724 /* Must be a writeable data segment descriptor. */
3725 if (!DescSS.Legacy.Gen.u1DescType)
3726 {
3727 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3728 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3729 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3730 }
3731 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3732 {
3733 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3734 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3735 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3736 }
3737
3738 /* Present? */
3739 if (!DescSS.Legacy.Gen.u1Present)
3740 {
3741 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3742 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3743 }
3744 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3745 }
3746
3747 /* Check EIP. */
3748 if (DescCS.Legacy.Gen.u1Long)
3749 {
3750 if (!IEM_IS_CANONICAL(uNewRip))
3751 {
3752 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3753 uNewCs, uNewRip, uNewSs, uNewRsp));
3754 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3755 }
3756 }
3757 else
3758 {
3759 if (uNewRip > cbLimitCS)
3760 {
3761 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3762 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3763 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3764 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3765 }
3766 }
3767
3768 /*
3769 * Commit the changes, marking CS and SS accessed first since
3770 * that may fail.
3771 */
3772 /** @todo where exactly are these actually marked accessed by a real CPU? */
3773 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3774 {
3775 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3776 if (rcStrict != VINF_SUCCESS)
3777 return rcStrict;
3778 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3779 }
3780 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3781 {
3782 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3783 if (rcStrict != VINF_SUCCESS)
3784 return rcStrict;
3785 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3786 }
3787
3788 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3789 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3790 if (enmEffOpSize != IEMMODE_16BIT)
3791 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3792 if (pVCpu->iem.s.uCpl == 0)
3793 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3794 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3795 fEFlagsMask |= X86_EFL_IF;
3796 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3797 fEFlagsNew &= ~fEFlagsMask;
3798 fEFlagsNew |= uNewFlags & fEFlagsMask;
3799#ifdef DBGFTRACE_ENABLED
3800 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3801 pVCpu->iem.s.uCpl, uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3802#endif
3803
3804 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3805 pVCpu->cpum.GstCtx.rip = uNewRip;
3806 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3807 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3808 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3809 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3810 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3811 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3812 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3813 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
3814 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3815 else
3816 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3817 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
3818 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
3819 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3820 {
3821 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3822 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3823 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3824 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3825 Log2(("iretq new SS: NULL\n"));
3826 }
3827 else
3828 {
3829 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3830 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3831 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3832 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3833 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3834 }
3835
3836 if (pVCpu->iem.s.uCpl != uNewCpl)
3837 {
3838 pVCpu->iem.s.uCpl = uNewCpl;
3839 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds);
3840 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es);
3841 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs);
3842 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs);
3843 }
3844
3845 /* Flush the prefetch buffer. */
3846#ifdef IEM_WITH_CODE_TLB
3847 pVCpu->iem.s.pbInstrBuf = NULL;
3848#else
3849 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3850#endif
3851
3852 return VINF_SUCCESS;
3853}
3854
3855
3856/**
3857 * Implements iret.
3858 *
3859 * @param enmEffOpSize The effective operand size.
3860 */
3861IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3862{
3863 bool fBlockingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3864
3865#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3866 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3867 {
3868 /*
3869 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution
3870 * of this IRET instruction. We need to provide this information as part of some
3871 * VM-exits.
3872 *
3873 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3874 */
3875 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI))
3876 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking;
3877 else
3878 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi;
3879
3880 /*
3881 * If "NMI exiting" is set, IRET does not affect blocking of NMIs.
3882 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3883 */
3884 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT))
3885 fBlockingNmi = false;
3886
3887 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */
3888 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
3889 }
3890#endif
3891
3892 /*
3893 * The SVM nested-guest intercept for IRET takes priority over all exceptions,
3894 * The NMI is still held pending (which I assume means blocking of further NMIs
3895 * is in effect).
3896 *
3897 * See AMD spec. 15.9 "Instruction Intercepts".
3898 * See AMD spec. 15.21.9 "NMI Support".
3899 */
3900 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3901 {
3902 Log(("iret: Guest intercept -> #VMEXIT\n"));
3903 IEM_SVM_UPDATE_NRIP(pVCpu);
3904 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3905 }
3906
3907 /*
3908 * Clear NMI blocking, if any, before causing any further exceptions.
3909 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
3910 */
3911 if (fBlockingNmi)
3912 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3913
3914 /*
3915 * Call a mode specific worker.
3916 */
3917 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3918 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3919 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3920 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
3921 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3922 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3923}
3924
3925
3926static void iemLoadallSetSelector(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
3927{
3928 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3929
3930 pHid->Sel = uSel;
3931 pHid->ValidSel = uSel;
3932 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3933}
3934
3935
3936static void iemLoadall286SetDescCache(PVMCPUCC pVCpu, uint8_t iSegReg, uint8_t const *pbMem)
3937{
3938 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3939
3940 /* The base is in the first three bytes. */
3941 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16);
3942 /* The attributes are in the fourth byte. */
3943 pHid->Attr.u = pbMem[3];
3944 /* The limit is in the last two bytes. */
3945 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8);
3946}
3947
3948
3949/**
3950 * Implements 286 LOADALL (286 CPUs only).
3951 */
3952IEM_CIMPL_DEF_0(iemCImpl_loadall286)
3953{
3954 NOREF(cbInstr);
3955
3956 /* Data is loaded from a buffer at 800h. No checks are done on the
3957 * validity of loaded state.
3958 *
3959 * LOADALL only loads the internal CPU state, it does not access any
3960 * GDT, LDT, or similar tables.
3961 */
3962
3963 if (pVCpu->iem.s.uCpl != 0)
3964 {
3965 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
3966 return iemRaiseGeneralProtectionFault0(pVCpu);
3967 }
3968
3969 uint8_t const *pbMem = NULL;
3970 uint16_t const *pa16Mem;
3971 uint8_t const *pa8Mem;
3972 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */
3973 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
3974 if (rcStrict != VINF_SUCCESS)
3975 return rcStrict;
3976
3977 /* The MSW is at offset 0x06. */
3978 pa16Mem = (uint16_t const *)(pbMem + 0x06);
3979 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
3980 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3981 uNewCr0 |= *pa16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3982 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
3983
3984 CPUMSetGuestCR0(pVCpu, uNewCr0);
3985 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0);
3986
3987 /* Inform PGM if mode changed. */
3988 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
3989 {
3990 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
3991 AssertRCReturn(rc, rc);
3992 /* ignore informational status codes */
3993 }
3994 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
3995 false /* fForce */);
3996
3997 /* TR selector is at offset 0x16. */
3998 pa16Mem = (uint16_t const *)(pbMem + 0x16);
3999 pVCpu->cpum.GstCtx.tr.Sel = pa16Mem[0];
4000 pVCpu->cpum.GstCtx.tr.ValidSel = pa16Mem[0];
4001 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4002
4003 /* Followed by FLAGS... */
4004 pVCpu->cpum.GstCtx.eflags.u = pa16Mem[1] | X86_EFL_1;
4005 pVCpu->cpum.GstCtx.ip = pa16Mem[2]; /* ...and IP. */
4006
4007 /* LDT is at offset 0x1C. */
4008 pa16Mem = (uint16_t const *)(pbMem + 0x1C);
4009 pVCpu->cpum.GstCtx.ldtr.Sel = pa16Mem[0];
4010 pVCpu->cpum.GstCtx.ldtr.ValidSel = pa16Mem[0];
4011 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4012
4013 /* Segment registers are at offset 0x1E. */
4014 pa16Mem = (uint16_t const *)(pbMem + 0x1E);
4015 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa16Mem[0]);
4016 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa16Mem[1]);
4017 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa16Mem[2]);
4018 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa16Mem[3]);
4019
4020 /* GPRs are at offset 0x26. */
4021 pa16Mem = (uint16_t const *)(pbMem + 0x26);
4022 pVCpu->cpum.GstCtx.di = pa16Mem[0];
4023 pVCpu->cpum.GstCtx.si = pa16Mem[1];
4024 pVCpu->cpum.GstCtx.bp = pa16Mem[2];
4025 pVCpu->cpum.GstCtx.sp = pa16Mem[3];
4026 pVCpu->cpum.GstCtx.bx = pa16Mem[4];
4027 pVCpu->cpum.GstCtx.dx = pa16Mem[5];
4028 pVCpu->cpum.GstCtx.cx = pa16Mem[6];
4029 pVCpu->cpum.GstCtx.ax = pa16Mem[7];
4030
4031 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
4032 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36);
4033 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C);
4034 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42);
4035 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48);
4036
4037 /* GDTR contents are at offset 0x4E, 6 bytes. */
4038 RTGCPHYS GCPtrBase;
4039 uint16_t cbLimit;
4040 pa8Mem = pbMem + 0x4E;
4041 /* NB: Fourth byte "should be zero"; we are ignoring it. */
4042 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4043 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4044 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4045
4046 /* IDTR contents are at offset 0x5A, 6 bytes. */
4047 pa8Mem = pbMem + 0x5A;
4048 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4049 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4050 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4051
4052 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt));
4053 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u));
4054 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u));
4055 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u));
4056 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
4057 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
4058
4059 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R);
4060 if (rcStrict != VINF_SUCCESS)
4061 return rcStrict;
4062
4063 /* The CPL may change. It is taken from the "DPL fields of the SS and CS
4064 * descriptor caches" but there is no word as to what happens if those are
4065 * not identical (probably bad things).
4066 */
4067 pVCpu->iem.s.uCpl = pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl;
4068
4069 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR);
4070
4071 /* Flush the prefetch buffer. */
4072#ifdef IEM_WITH_CODE_TLB
4073 pVCpu->iem.s.pbInstrBuf = NULL;
4074#else
4075 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4076#endif
4077 return rcStrict;
4078}
4079
4080
4081/**
4082 * Implements SYSCALL (AMD and Intel64).
4083 */
4084IEM_CIMPL_DEF_0(iemCImpl_syscall)
4085{
4086 /** @todo hack, LOADALL should be decoded as such on a 286. */
4087 if (RT_UNLIKELY(pVCpu->iem.s.uTargetCpu == IEMTARGETCPU_286))
4088 return iemCImpl_loadall286(pVCpu, cbInstr);
4089
4090 /*
4091 * Check preconditions.
4092 *
4093 * Note that CPUs described in the documentation may load a few odd values
4094 * into CS and SS than we allow here. This has yet to be checked on real
4095 * hardware.
4096 */
4097 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4098 {
4099 Log(("syscall: Not enabled in EFER -> #UD\n"));
4100 return iemRaiseUndefinedOpcode(pVCpu);
4101 }
4102 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4103 {
4104 Log(("syscall: Protected mode is required -> #GP(0)\n"));
4105 return iemRaiseGeneralProtectionFault0(pVCpu);
4106 }
4107 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4108 {
4109 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4110 return iemRaiseUndefinedOpcode(pVCpu);
4111 }
4112
4113 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4114
4115 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
4116 /** @todo what about LDT selectors? Shouldn't matter, really. */
4117 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4118 uint16_t uNewSs = uNewCs + 8;
4119 if (uNewCs == 0 || uNewSs == 0)
4120 {
4121 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4122 return iemRaiseGeneralProtectionFault0(pVCpu);
4123 }
4124
4125 /* Long mode and legacy mode differs. */
4126 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4127 {
4128 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR;
4129
4130 /* This test isn't in the docs, but I'm not trusting the guys writing
4131 the MSRs to have validated the values as canonical like they should. */
4132 if (!IEM_IS_CANONICAL(uNewRip))
4133 {
4134 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4135 return iemRaiseUndefinedOpcode(pVCpu);
4136 }
4137
4138 /*
4139 * Commit it.
4140 */
4141 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip));
4142 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr;
4143 pVCpu->cpum.GstCtx.rip = uNewRip;
4144
4145 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF;
4146 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u;
4147 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK;
4148 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1;
4149
4150 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4151 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4152 }
4153 else
4154 {
4155 /*
4156 * Commit it.
4157 */
4158 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
4159 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
4160 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr;
4161 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
4162 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
4163
4164 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4165 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4166 }
4167 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
4168 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
4169 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4170 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4171 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4172
4173 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
4174 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
4175 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4176 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4177 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4178
4179 /* Flush the prefetch buffer. */
4180#ifdef IEM_WITH_CODE_TLB
4181 pVCpu->iem.s.pbInstrBuf = NULL;
4182#else
4183 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4184#endif
4185
4186 return VINF_SUCCESS;
4187}
4188
4189
4190/**
4191 * Implements SYSRET (AMD and Intel64).
4192 */
4193IEM_CIMPL_DEF_0(iemCImpl_sysret)
4194
4195{
4196 RT_NOREF_PV(cbInstr);
4197
4198 /*
4199 * Check preconditions.
4200 *
4201 * Note that CPUs described in the documentation may load a few odd values
4202 * into CS and SS than we allow here. This has yet to be checked on real
4203 * hardware.
4204 */
4205 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4206 {
4207 Log(("sysret: Not enabled in EFER -> #UD\n"));
4208 return iemRaiseUndefinedOpcode(pVCpu);
4209 }
4210 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4211 {
4212 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4213 return iemRaiseUndefinedOpcode(pVCpu);
4214 }
4215 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4216 {
4217 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4218 return iemRaiseGeneralProtectionFault0(pVCpu);
4219 }
4220 if (pVCpu->iem.s.uCpl != 0)
4221 {
4222 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4223 return iemRaiseGeneralProtectionFault0(pVCpu);
4224 }
4225
4226 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4227
4228 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4229 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4230 uint16_t uNewSs = uNewCs + 8;
4231 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4232 uNewCs += 16;
4233 if (uNewCs == 0 || uNewSs == 0)
4234 {
4235 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4236 return iemRaiseGeneralProtectionFault0(pVCpu);
4237 }
4238
4239 /*
4240 * Commit it.
4241 */
4242 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4243 {
4244 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4245 {
4246 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
4247 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11));
4248 /* Note! We disregard intel manual regarding the RCX cananonical
4249 check, ask intel+xen why AMD doesn't do it. */
4250 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4251 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4252 | (3 << X86DESCATTR_DPL_SHIFT);
4253 }
4254 else
4255 {
4256 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
4257 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11));
4258 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx;
4259 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4260 | (3 << X86DESCATTR_DPL_SHIFT);
4261 }
4262 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4263 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
4264 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4265 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1;
4266 }
4267 else
4268 {
4269 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx));
4270 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4271 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF;
4272 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4273 | (3 << X86DESCATTR_DPL_SHIFT);
4274 }
4275 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3;
4276 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3;
4277 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4278 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4279 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4280
4281 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3;
4282 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3;
4283 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4284 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4285 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4286 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4287 * on sysret. */
4288
4289 /* Flush the prefetch buffer. */
4290#ifdef IEM_WITH_CODE_TLB
4291 pVCpu->iem.s.pbInstrBuf = NULL;
4292#else
4293 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4294#endif
4295
4296 return VINF_SUCCESS;
4297}
4298
4299
4300/**
4301 * Implements SYSENTER (Intel, 32-bit AMD).
4302 */
4303IEM_CIMPL_DEF_0(iemCImpl_sysenter)
4304{
4305 RT_NOREF(cbInstr);
4306
4307 /*
4308 * Check preconditions.
4309 *
4310 * Note that CPUs described in the documentation may load a few odd values
4311 * into CS and SS than we allow here. This has yet to be checked on real
4312 * hardware.
4313 */
4314 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4315 {
4316 Log(("sysenter: not supported -=> #UD\n"));
4317 return iemRaiseUndefinedOpcode(pVCpu);
4318 }
4319 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4320 {
4321 Log(("sysenter: Protected or long mode is required -> #GP(0)\n"));
4322 return iemRaiseGeneralProtectionFault0(pVCpu);
4323 }
4324 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4325 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4326 {
4327 Log(("sysenter: Only available in protected mode on AMD -> #UD\n"));
4328 return iemRaiseUndefinedOpcode(pVCpu);
4329 }
4330 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4331 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4332 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4333 {
4334 Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4335 return iemRaiseGeneralProtectionFault0(pVCpu);
4336 }
4337
4338 /* This test isn't in the docs, it's just a safeguard against missing
4339 canonical checks when writing the registers. */
4340 if (RT_LIKELY( !fIsLongMode
4341 || ( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip)
4342 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp))))
4343 { /* likely */ }
4344 else
4345 {
4346 Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n",
4347 pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp));
4348 return iemRaiseUndefinedOpcode(pVCpu);
4349 }
4350
4351/** @todo Test: Sysenter from ring-0, ring-1 and ring-2. */
4352
4353 /*
4354 * Update registers and commit.
4355 */
4356 if (fIsLongMode)
4357 {
4358 Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4359 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip));
4360 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.SysEnter.eip;
4361 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.SysEnter.esp;
4362 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4363 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4364 }
4365 else
4366 {
4367 Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, (uint32_t)pVCpu->cpum.GstCtx.rip,
4368 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip));
4369 pVCpu->cpum.GstCtx.rip = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip;
4370 pVCpu->cpum.GstCtx.rsp = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp;
4371 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4372 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4373 }
4374 pVCpu->cpum.GstCtx.cs.Sel = uNewCs & X86_SEL_MASK_OFF_RPL;
4375 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL;
4376 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4377 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4378 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4379
4380 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4381 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4382 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4383 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4384 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4385 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC;
4386 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4387
4388 pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0;
4389 pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0;
4390 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4391
4392 pVCpu->iem.s.uCpl = 0;
4393
4394 /* Flush the prefetch buffer. */
4395#ifdef IEM_WITH_CODE_TLB
4396 pVCpu->iem.s.pbInstrBuf = NULL;
4397#else
4398 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4399#endif
4400
4401 return VINF_SUCCESS;
4402}
4403
4404
4405/**
4406 * Implements SYSEXIT (Intel, 32-bit AMD).
4407 *
4408 * @param enmEffOpSize The effective operand size.
4409 */
4410IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize)
4411{
4412 RT_NOREF(cbInstr);
4413
4414 /*
4415 * Check preconditions.
4416 *
4417 * Note that CPUs described in the documentation may load a few odd values
4418 * into CS and SS than we allow here. This has yet to be checked on real
4419 * hardware.
4420 */
4421 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4422 {
4423 Log(("sysexit: not supported -=> #UD\n"));
4424 return iemRaiseUndefinedOpcode(pVCpu);
4425 }
4426 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4427 {
4428 Log(("sysexit: Protected or long mode is required -> #GP(0)\n"));
4429 return iemRaiseGeneralProtectionFault0(pVCpu);
4430 }
4431 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4432 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4433 {
4434 Log(("sysexit: Only available in protected mode on AMD -> #UD\n"));
4435 return iemRaiseUndefinedOpcode(pVCpu);
4436 }
4437 if (pVCpu->iem.s.uCpl != 0)
4438 {
4439 Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", pVCpu->iem.s.uCpl));
4440 return iemRaiseGeneralProtectionFault0(pVCpu);
4441 }
4442 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4443 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4444 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4445 {
4446 Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4447 return iemRaiseGeneralProtectionFault0(pVCpu);
4448 }
4449
4450 /*
4451 * Update registers and commit.
4452 */
4453 if (enmEffOpSize == IEMMODE_64BIT)
4454 {
4455 Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4456 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx));
4457 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rdx;
4458 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.rcx;
4459 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4460 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4461 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 32;
4462 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 32;
4463 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 40;
4464 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 40;
4465 }
4466 else
4467 {
4468 Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4469 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx));
4470 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.edx;
4471 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.ecx;
4472 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4473 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4474 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 16;
4475 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 16;
4476 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 24;
4477 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 24;
4478 }
4479 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4480 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4481 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4482
4483 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4484 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4485 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4486 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4487 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4488 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4489
4490 pVCpu->iem.s.uCpl = 3;
4491
4492 /* Flush the prefetch buffer. */
4493#ifdef IEM_WITH_CODE_TLB
4494 pVCpu->iem.s.pbInstrBuf = NULL;
4495#else
4496 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4497#endif
4498
4499 return VINF_SUCCESS;
4500}
4501
4502
4503/**
4504 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4505 *
4506 * @param iSegReg The segment register number (valid).
4507 * @param uSel The new selector value.
4508 */
4509IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
4510{
4511 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4512 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4513 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4514
4515 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4516
4517 /*
4518 * Real mode and V8086 mode are easy.
4519 */
4520 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4521 {
4522 *pSel = uSel;
4523 pHid->u64Base = (uint32_t)uSel << 4;
4524 pHid->ValidSel = uSel;
4525 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4526#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4527 /** @todo Does the CPU actually load limits and attributes in the
4528 * real/V8086 mode segment load case? It doesn't for CS in far
4529 * jumps... Affects unreal mode. */
4530 pHid->u32Limit = 0xffff;
4531 pHid->Attr.u = 0;
4532 pHid->Attr.n.u1Present = 1;
4533 pHid->Attr.n.u1DescType = 1;
4534 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4535 ? X86_SEL_TYPE_RW
4536 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4537#endif
4538 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4539 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4540 return VINF_SUCCESS;
4541 }
4542
4543 /*
4544 * Protected mode.
4545 *
4546 * Check if it's a null segment selector value first, that's OK for DS, ES,
4547 * FS and GS. If not null, then we have to load and parse the descriptor.
4548 */
4549 if (!(uSel & X86_SEL_MASK_OFF_RPL))
4550 {
4551 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4552 if (iSegReg == X86_SREG_SS)
4553 {
4554 /* In 64-bit kernel mode, the stack can be 0 because of the way
4555 interrupts are dispatched. AMD seems to have a slighly more
4556 relaxed relationship to SS.RPL than intel does. */
4557 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4558 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4559 || pVCpu->iem.s.uCpl > 2
4560 || ( uSel != pVCpu->iem.s.uCpl
4561 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4562 {
4563 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4564 return iemRaiseGeneralProtectionFault0(pVCpu);
4565 }
4566 }
4567
4568 *pSel = uSel; /* Not RPL, remember :-) */
4569 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4570 if (iSegReg == X86_SREG_SS)
4571 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT;
4572
4573 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4574 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4575
4576 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4577 return VINF_SUCCESS;
4578 }
4579
4580 /* Fetch the descriptor. */
4581 IEMSELDESC Desc;
4582 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4583 if (rcStrict != VINF_SUCCESS)
4584 return rcStrict;
4585
4586 /* Check GPs first. */
4587 if (!Desc.Legacy.Gen.u1DescType)
4588 {
4589 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4590 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4591 }
4592 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4593 {
4594 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4595 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4596 {
4597 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4598 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4599 }
4600 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
4601 {
4602 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl));
4603 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4604 }
4605 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
4606 {
4607 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4608 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4609 }
4610 }
4611 else
4612 {
4613 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4614 {
4615 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4616 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4617 }
4618 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4619 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4620 {
4621#if 0 /* this is what intel says. */
4622 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4623 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4624 {
4625 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4626 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4627 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4628 }
4629#else /* this is what makes more sense. */
4630 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4631 {
4632 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4633 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4634 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4635 }
4636 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4637 {
4638 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4639 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4640 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4641 }
4642#endif
4643 }
4644 }
4645
4646 /* Is it there? */
4647 if (!Desc.Legacy.Gen.u1Present)
4648 {
4649 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4650 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4651 }
4652
4653 /* The base and limit. */
4654 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4655 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4656
4657 /*
4658 * Ok, everything checked out fine. Now set the accessed bit before
4659 * committing the result into the registers.
4660 */
4661 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4662 {
4663 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4664 if (rcStrict != VINF_SUCCESS)
4665 return rcStrict;
4666 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4667 }
4668
4669 /* commit */
4670 *pSel = uSel;
4671 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4672 pHid->u32Limit = cbLimit;
4673 pHid->u64Base = u64Base;
4674 pHid->ValidSel = uSel;
4675 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4676
4677 /** @todo check if the hidden bits are loaded correctly for 64-bit
4678 * mode. */
4679 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4680
4681 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4682 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4683 return VINF_SUCCESS;
4684}
4685
4686
4687/**
4688 * Implements 'mov SReg, r/m'.
4689 *
4690 * @param iSegReg The segment register number (valid).
4691 * @param uSel The new selector value.
4692 */
4693IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4694{
4695 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4696 if (rcStrict == VINF_SUCCESS)
4697 {
4698 if (iSegReg == X86_SREG_SS)
4699 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
4700 }
4701 return rcStrict;
4702}
4703
4704
4705/**
4706 * Implements 'pop SReg'.
4707 *
4708 * @param iSegReg The segment register number (valid).
4709 * @param enmEffOpSize The efficient operand size (valid).
4710 */
4711IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4712{
4713 VBOXSTRICTRC rcStrict;
4714
4715 /*
4716 * Read the selector off the stack and join paths with mov ss, reg.
4717 */
4718 RTUINT64U TmpRsp;
4719 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
4720 switch (enmEffOpSize)
4721 {
4722 case IEMMODE_16BIT:
4723 {
4724 uint16_t uSel;
4725 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4726 if (rcStrict == VINF_SUCCESS)
4727 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4728 break;
4729 }
4730
4731 case IEMMODE_32BIT:
4732 {
4733 uint32_t u32Value;
4734 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4735 if (rcStrict == VINF_SUCCESS)
4736 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4737 break;
4738 }
4739
4740 case IEMMODE_64BIT:
4741 {
4742 uint64_t u64Value;
4743 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4744 if (rcStrict == VINF_SUCCESS)
4745 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4746 break;
4747 }
4748 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4749 }
4750
4751 /*
4752 * Commit the stack on success.
4753 */
4754 if (rcStrict == VINF_SUCCESS)
4755 {
4756 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
4757 if (iSegReg == X86_SREG_SS)
4758 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
4759 }
4760 return rcStrict;
4761}
4762
4763
4764/**
4765 * Implements lgs, lfs, les, lds & lss.
4766 */
4767IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize)
4768{
4769 /*
4770 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4771 */
4772 /** @todo verify and test that mov, pop and lXs works the segment
4773 * register loading in the exact same way. */
4774 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4775 if (rcStrict == VINF_SUCCESS)
4776 {
4777 switch (enmEffOpSize)
4778 {
4779 case IEMMODE_16BIT:
4780 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4781 break;
4782 case IEMMODE_32BIT:
4783 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4784 break;
4785 case IEMMODE_64BIT:
4786 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4787 break;
4788 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4789 }
4790 }
4791
4792 return rcStrict;
4793}
4794
4795
4796/**
4797 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4798 *
4799 * @retval VINF_SUCCESS on success.
4800 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4801 * @retval iemMemFetchSysU64 return value.
4802 *
4803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4804 * @param uSel The selector value.
4805 * @param fAllowSysDesc Whether system descriptors are OK or not.
4806 * @param pDesc Where to return the descriptor on success.
4807 */
4808static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPUCC pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4809{
4810 pDesc->Long.au64[0] = 0;
4811 pDesc->Long.au64[1] = 0;
4812
4813 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4814 return VINF_IEM_SELECTOR_NOT_OK;
4815
4816 /* Within the table limits? */
4817 RTGCPTR GCPtrBase;
4818 if (uSel & X86_SEL_LDT)
4819 {
4820 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
4821 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
4822 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
4823 return VINF_IEM_SELECTOR_NOT_OK;
4824 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
4825 }
4826 else
4827 {
4828 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
4829 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
4830 return VINF_IEM_SELECTOR_NOT_OK;
4831 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
4832 }
4833
4834 /* Fetch the descriptor. */
4835 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4836 if (rcStrict != VINF_SUCCESS)
4837 return rcStrict;
4838 if (!pDesc->Legacy.Gen.u1DescType)
4839 {
4840 if (!fAllowSysDesc)
4841 return VINF_IEM_SELECTOR_NOT_OK;
4842 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4843 {
4844 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4845 if (rcStrict != VINF_SUCCESS)
4846 return rcStrict;
4847 }
4848
4849 }
4850
4851 return VINF_SUCCESS;
4852}
4853
4854
4855/**
4856 * Implements verr (fWrite = false) and verw (fWrite = true).
4857 */
4858IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4859{
4860 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4861
4862 /** @todo figure whether the accessed bit is set or not. */
4863
4864 bool fAccessible = true;
4865 IEMSELDESC Desc;
4866 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4867 if (rcStrict == VINF_SUCCESS)
4868 {
4869 /* Check the descriptor, order doesn't matter much here. */
4870 if ( !Desc.Legacy.Gen.u1DescType
4871 || !Desc.Legacy.Gen.u1Present)
4872 fAccessible = false;
4873 else
4874 {
4875 if ( fWrite
4876 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4877 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4878 fAccessible = false;
4879
4880 /** @todo testcase for the conforming behavior. */
4881 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4882 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4883 {
4884 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4885 fAccessible = false;
4886 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4887 fAccessible = false;
4888 }
4889 }
4890
4891 }
4892 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4893 fAccessible = false;
4894 else
4895 return rcStrict;
4896
4897 /* commit */
4898 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible;
4899
4900 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4901 return VINF_SUCCESS;
4902}
4903
4904
4905/**
4906 * Implements LAR and LSL with 64-bit operand size.
4907 *
4908 * @returns VINF_SUCCESS.
4909 * @param pu64Dst Pointer to the destination register.
4910 * @param uSel The selector to load details for.
4911 * @param fIsLar true = LAR, false = LSL.
4912 */
4913IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4914{
4915 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4916
4917 /** @todo figure whether the accessed bit is set or not. */
4918
4919 bool fDescOk = true;
4920 IEMSELDESC Desc;
4921 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, true /*fAllowSysDesc*/, &Desc);
4922 if (rcStrict == VINF_SUCCESS)
4923 {
4924 /*
4925 * Check the descriptor type.
4926 */
4927 if (!Desc.Legacy.Gen.u1DescType)
4928 {
4929 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4930 {
4931 if (Desc.Long.Gen.u5Zeros)
4932 fDescOk = false;
4933 else
4934 switch (Desc.Long.Gen.u4Type)
4935 {
4936 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4937 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4938 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4939 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4940 break;
4941 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4942 fDescOk = fIsLar;
4943 break;
4944 default:
4945 fDescOk = false;
4946 break;
4947 }
4948 }
4949 else
4950 {
4951 switch (Desc.Long.Gen.u4Type)
4952 {
4953 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4954 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4955 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4956 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4957 case X86_SEL_TYPE_SYS_LDT:
4958 break;
4959 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4960 case X86_SEL_TYPE_SYS_TASK_GATE:
4961 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4962 fDescOk = fIsLar;
4963 break;
4964 default:
4965 fDescOk = false;
4966 break;
4967 }
4968 }
4969 }
4970 if (fDescOk)
4971 {
4972 /*
4973 * Check the RPL/DPL/CPL interaction..
4974 */
4975 /** @todo testcase for the conforming behavior. */
4976 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4977 || !Desc.Legacy.Gen.u1DescType)
4978 {
4979 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4980 fDescOk = false;
4981 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4982 fDescOk = false;
4983 }
4984 }
4985
4986 if (fDescOk)
4987 {
4988 /*
4989 * All fine, start committing the result.
4990 */
4991 if (fIsLar)
4992 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4993 else
4994 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4995 }
4996
4997 }
4998 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4999 fDescOk = false;
5000 else
5001 return rcStrict;
5002
5003 /* commit flags value and advance rip. */
5004 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk;
5005 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5006
5007 return VINF_SUCCESS;
5008}
5009
5010
5011/**
5012 * Implements LAR and LSL with 16-bit operand size.
5013 *
5014 * @returns VINF_SUCCESS.
5015 * @param pu16Dst Pointer to the destination register.
5016 * @param uSel The selector to load details for.
5017 * @param fIsLar true = LAR, false = LSL.
5018 */
5019IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
5020{
5021 uint64_t u64TmpDst = *pu16Dst;
5022 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
5023 *pu16Dst = u64TmpDst;
5024 return VINF_SUCCESS;
5025}
5026
5027
5028/**
5029 * Implements lgdt.
5030 *
5031 * @param iEffSeg The segment of the new gdtr contents
5032 * @param GCPtrEffSrc The address of the new gdtr contents.
5033 * @param enmEffOpSize The effective operand size.
5034 */
5035IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5036{
5037 if (pVCpu->iem.s.uCpl != 0)
5038 return iemRaiseGeneralProtectionFault0(pVCpu);
5039 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5040
5041 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5042 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5043 {
5044 Log(("lgdt: Guest intercept -> VM-exit\n"));
5045 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_LGDT, cbInstr);
5046 }
5047
5048 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
5049 {
5050 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
5051 IEM_SVM_UPDATE_NRIP(pVCpu);
5052 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5053 }
5054
5055 /*
5056 * Fetch the limit and base address.
5057 */
5058 uint16_t cbLimit;
5059 RTGCPTR GCPtrBase;
5060 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5061 if (rcStrict == VINF_SUCCESS)
5062 {
5063 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
5064 || X86_IS_CANONICAL(GCPtrBase))
5065 {
5066 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
5067 if (rcStrict == VINF_SUCCESS)
5068 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5069 }
5070 else
5071 {
5072 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5073 return iemRaiseGeneralProtectionFault0(pVCpu);
5074 }
5075 }
5076 return rcStrict;
5077}
5078
5079
5080/**
5081 * Implements sgdt.
5082 *
5083 * @param iEffSeg The segment where to store the gdtr content.
5084 * @param GCPtrEffDst The address where to store the gdtr content.
5085 */
5086IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5087{
5088 /*
5089 * Join paths with sidt.
5090 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5091 * you really must know.
5092 */
5093 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5094 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5095 {
5096 Log(("sgdt: Guest intercept -> VM-exit\n"));
5097 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_SGDT, cbInstr);
5098 }
5099
5100 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
5101 {
5102 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
5103 IEM_SVM_UPDATE_NRIP(pVCpu);
5104 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5105 }
5106
5107 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
5108 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst);
5109 if (rcStrict == VINF_SUCCESS)
5110 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5111 return rcStrict;
5112}
5113
5114
5115/**
5116 * Implements lidt.
5117 *
5118 * @param iEffSeg The segment of the new idtr contents
5119 * @param GCPtrEffSrc The address of the new idtr contents.
5120 * @param enmEffOpSize The effective operand size.
5121 */
5122IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5123{
5124 if (pVCpu->iem.s.uCpl != 0)
5125 return iemRaiseGeneralProtectionFault0(pVCpu);
5126 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5127
5128 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
5129 {
5130 Log(("lidt: Guest intercept -> #VMEXIT\n"));
5131 IEM_SVM_UPDATE_NRIP(pVCpu);
5132 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5133 }
5134
5135 /*
5136 * Fetch the limit and base address.
5137 */
5138 uint16_t cbLimit;
5139 RTGCPTR GCPtrBase;
5140 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5141 if (rcStrict == VINF_SUCCESS)
5142 {
5143 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
5144 || X86_IS_CANONICAL(GCPtrBase))
5145 {
5146 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
5147 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5148 }
5149 else
5150 {
5151 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5152 return iemRaiseGeneralProtectionFault0(pVCpu);
5153 }
5154 }
5155 return rcStrict;
5156}
5157
5158
5159/**
5160 * Implements sidt.
5161 *
5162 * @param iEffSeg The segment where to store the idtr content.
5163 * @param GCPtrEffDst The address where to store the idtr content.
5164 */
5165IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5166{
5167 /*
5168 * Join paths with sgdt.
5169 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5170 * you really must know.
5171 */
5172 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
5173 {
5174 Log(("sidt: Guest intercept -> #VMEXIT\n"));
5175 IEM_SVM_UPDATE_NRIP(pVCpu);
5176 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5177 }
5178
5179 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR);
5180 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst);
5181 if (rcStrict == VINF_SUCCESS)
5182 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5183 return rcStrict;
5184}
5185
5186
5187/**
5188 * Implements lldt.
5189 *
5190 * @param uNewLdt The new LDT selector value.
5191 */
5192IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
5193{
5194 /*
5195 * Check preconditions.
5196 */
5197 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5198 {
5199 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
5200 return iemRaiseUndefinedOpcode(pVCpu);
5201 }
5202 if (pVCpu->iem.s.uCpl != 0)
5203 {
5204 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl));
5205 return iemRaiseGeneralProtectionFault0(pVCpu);
5206 }
5207 /* Nested-guest VMX intercept. */
5208 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5209 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5210 {
5211 Log(("lldt: Guest intercept -> VM-exit\n"));
5212 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LLDT, cbInstr);
5213 }
5214 if (uNewLdt & X86_SEL_LDT)
5215 {
5216 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
5217 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
5218 }
5219
5220 /*
5221 * Now, loading a NULL selector is easy.
5222 */
5223 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
5224 {
5225 /* Nested-guest SVM intercept. */
5226 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5227 {
5228 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5229 IEM_SVM_UPDATE_NRIP(pVCpu);
5230 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5231 }
5232
5233 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
5234 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR;
5235 CPUMSetGuestLDTR(pVCpu, uNewLdt);
5236 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
5237 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5238 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
5239 {
5240 /* AMD-V seems to leave the base and limit alone. */
5241 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
5242 }
5243 else
5244 {
5245 /* VT-x (Intel 3960x) seems to be doing the following. */
5246 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
5247 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
5248 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX;
5249 }
5250
5251 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5252 return VINF_SUCCESS;
5253 }
5254
5255 /*
5256 * Read the descriptor.
5257 */
5258 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);
5259 IEMSELDESC Desc;
5260 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
5261 if (rcStrict != VINF_SUCCESS)
5262 return rcStrict;
5263
5264 /* Check GPs first. */
5265 if (Desc.Legacy.Gen.u1DescType)
5266 {
5267 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5268 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5269 }
5270 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
5271 {
5272 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5273 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5274 }
5275 uint64_t u64Base;
5276 if (!IEM_IS_LONG_MODE(pVCpu))
5277 u64Base = X86DESC_BASE(&Desc.Legacy);
5278 else
5279 {
5280 if (Desc.Long.Gen.u5Zeros)
5281 {
5282 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
5283 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5284 }
5285
5286 u64Base = X86DESC64_BASE(&Desc.Long);
5287 if (!IEM_IS_CANONICAL(u64Base))
5288 {
5289 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
5290 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5291 }
5292 }
5293
5294 /* NP */
5295 if (!Desc.Legacy.Gen.u1Present)
5296 {
5297 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
5298 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
5299 }
5300
5301 /* Nested-guest SVM intercept. */
5302 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5303 {
5304 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5305 IEM_SVM_UPDATE_NRIP(pVCpu);
5306 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5307 }
5308
5309 /*
5310 * It checks out alright, update the registers.
5311 */
5312/** @todo check if the actual value is loaded or if the RPL is dropped */
5313 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5314 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
5315 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5316 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5317 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5318 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5319
5320 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5321 return VINF_SUCCESS;
5322}
5323
5324
5325/**
5326 * Implements sldt GReg
5327 *
5328 * @param iGReg The general register to store the CRx value in.
5329 * @param enmEffOpSize The operand size.
5330 */
5331IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5332{
5333 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5334 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5335 {
5336 Log(("sldt: Guest intercept -> VM-exit\n"));
5337 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_SLDT, cbInstr);
5338 }
5339
5340 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
5341
5342 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5343 switch (enmEffOpSize)
5344 {
5345 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5346 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5347 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5349 }
5350 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5351 return VINF_SUCCESS;
5352}
5353
5354
5355/**
5356 * Implements sldt mem.
5357 *
5358 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5359 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5360 */
5361IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5362{
5363 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
5364
5365 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5366 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.ldtr.Sel);
5367 if (rcStrict == VINF_SUCCESS)
5368 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5369 return rcStrict;
5370}
5371
5372
5373/**
5374 * Implements ltr.
5375 *
5376 * @param uNewTr The new TSS selector value.
5377 */
5378IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
5379{
5380 /*
5381 * Check preconditions.
5382 */
5383 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5384 {
5385 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
5386 return iemRaiseUndefinedOpcode(pVCpu);
5387 }
5388 if (pVCpu->iem.s.uCpl != 0)
5389 {
5390 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl));
5391 return iemRaiseGeneralProtectionFault0(pVCpu);
5392 }
5393 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5394 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5395 {
5396 Log(("ltr: Guest intercept -> VM-exit\n"));
5397 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LTR, cbInstr);
5398 }
5399 if (uNewTr & X86_SEL_LDT)
5400 {
5401 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
5402 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
5403 }
5404 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
5405 {
5406 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
5407 return iemRaiseGeneralProtectionFault0(pVCpu);
5408 }
5409 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
5410 {
5411 Log(("ltr: Guest intercept -> #VMEXIT\n"));
5412 IEM_SVM_UPDATE_NRIP(pVCpu);
5413 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5414 }
5415
5416 /*
5417 * Read the descriptor.
5418 */
5419 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);
5420 IEMSELDESC Desc;
5421 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5422 if (rcStrict != VINF_SUCCESS)
5423 return rcStrict;
5424
5425 /* Check GPs first. */
5426 if (Desc.Legacy.Gen.u1DescType)
5427 {
5428 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5429 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5430 }
5431 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5432 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5433 || IEM_IS_LONG_MODE(pVCpu)) )
5434 {
5435 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5436 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5437 }
5438 uint64_t u64Base;
5439 if (!IEM_IS_LONG_MODE(pVCpu))
5440 u64Base = X86DESC_BASE(&Desc.Legacy);
5441 else
5442 {
5443 if (Desc.Long.Gen.u5Zeros)
5444 {
5445 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5446 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5447 }
5448
5449 u64Base = X86DESC64_BASE(&Desc.Long);
5450 if (!IEM_IS_CANONICAL(u64Base))
5451 {
5452 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5453 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5454 }
5455 }
5456
5457 /* NP */
5458 if (!Desc.Legacy.Gen.u1Present)
5459 {
5460 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5461 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5462 }
5463
5464 /*
5465 * Set it busy.
5466 * Note! Intel says this should lock down the whole descriptor, but we'll
5467 * restrict our selves to 32-bit for now due to lack of inline
5468 * assembly and such.
5469 */
5470 void *pvDesc;
5471 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL),
5472 IEM_ACCESS_DATA_RW, 0);
5473 if (rcStrict != VINF_SUCCESS)
5474 return rcStrict;
5475 switch ((uintptr_t)pvDesc & 3)
5476 {
5477 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5478 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5479 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5480 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5481 }
5482 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5483 if (rcStrict != VINF_SUCCESS)
5484 return rcStrict;
5485 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5486
5487 /*
5488 * It checks out alright, update the registers.
5489 */
5490/** @todo check if the actual value is loaded or if the RPL is dropped */
5491 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5492 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5493 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5494 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5495 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5496 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5497
5498 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5499 return VINF_SUCCESS;
5500}
5501
5502
5503/**
5504 * Implements str GReg
5505 *
5506 * @param iGReg The general register to store the CRx value in.
5507 * @param enmEffOpSize The operand size.
5508 */
5509IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5510{
5511 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5512 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5513 {
5514 Log(("str_reg: Guest intercept -> VM-exit\n"));
5515 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5516 }
5517
5518 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
5519
5520 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5521 switch (enmEffOpSize)
5522 {
5523 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5524 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5525 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5526 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5527 }
5528 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5529 return VINF_SUCCESS;
5530}
5531
5532
5533/**
5534 * Implements str mem.
5535 *
5536 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5537 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5538 */
5539IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5540{
5541 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5542 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5543 {
5544 Log(("str_mem: Guest intercept -> VM-exit\n"));
5545 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5546 }
5547
5548 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
5549
5550 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5551 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.tr.Sel);
5552 if (rcStrict == VINF_SUCCESS)
5553 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5554 return rcStrict;
5555}
5556
5557
5558/**
5559 * Implements mov GReg,CRx.
5560 *
5561 * @param iGReg The general register to store the CRx value in.
5562 * @param iCrReg The CRx register to read (valid).
5563 */
5564IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5565{
5566 if (pVCpu->iem.s.uCpl != 0)
5567 return iemRaiseGeneralProtectionFault0(pVCpu);
5568 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5569
5570 if (IEM_SVM_IS_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5571 {
5572 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5573 IEM_SVM_UPDATE_NRIP(pVCpu);
5574 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5575 }
5576
5577 /* Read it. */
5578 uint64_t crX;
5579 switch (iCrReg)
5580 {
5581 case 0:
5582 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5583 crX = pVCpu->cpum.GstCtx.cr0;
5584 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5585 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5586 break;
5587 case 2:
5588 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2);
5589 crX = pVCpu->cpum.GstCtx.cr2;
5590 break;
5591 case 3:
5592 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5593 crX = pVCpu->cpum.GstCtx.cr3;
5594 break;
5595 case 4:
5596 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5597 crX = pVCpu->cpum.GstCtx.cr4;
5598 break;
5599 case 8:
5600 {
5601 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5603 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5604 {
5605 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr8(pVCpu, iGReg, cbInstr);
5606 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5607 return rcStrict;
5608
5609 /*
5610 * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
5611 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
5612 * are cleared.
5613 *
5614 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5615 */
5616 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5617 {
5618 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5619 crX = (uTpr >> 4) & 0xf;
5620 break;
5621 }
5622 }
5623#endif
5624#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5625 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5626 {
5627 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
5628 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5629 {
5630 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5631 break;
5632 }
5633 }
5634#endif
5635 uint8_t uTpr;
5636 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5637 if (RT_SUCCESS(rc))
5638 crX = uTpr >> 4;
5639 else
5640 crX = 0;
5641 break;
5642 }
5643 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5644 }
5645
5646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5647 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5648 {
5649 switch (iCrReg)
5650 {
5651 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
5652 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break;
5653 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break;
5654
5655 case 3:
5656 {
5657 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
5658 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5659 return rcStrict;
5660 break;
5661 }
5662 }
5663 }
5664#endif
5665
5666 /* Store it. */
5667 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5668 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5669 else
5670 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5671
5672 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5673 return VINF_SUCCESS;
5674}
5675
5676
5677/**
5678 * Implements smsw GReg.
5679 *
5680 * @param iGReg The general register to store the CRx value in.
5681 * @param enmEffOpSize The operand size.
5682 */
5683IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5684{
5685 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5686
5687#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5688 uint64_t u64MaskedCr0;
5689 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5690 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5691 else
5692 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5693 uint64_t const u64GuestCr0 = u64MaskedCr0;
5694#else
5695 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5696#endif
5697
5698 switch (enmEffOpSize)
5699 {
5700 case IEMMODE_16BIT:
5701 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5702 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0;
5703 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5704 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xffe0;
5705 else
5706 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xfff0;
5707 break;
5708
5709 case IEMMODE_32BIT:
5710 *(uint32_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)u64GuestCr0;
5711 break;
5712
5713 case IEMMODE_64BIT:
5714 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = u64GuestCr0;
5715 break;
5716
5717 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5718 }
5719
5720 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5721 return VINF_SUCCESS;
5722}
5723
5724
5725/**
5726 * Implements smsw mem.
5727 *
5728 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5729 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5730 */
5731IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5732{
5733 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5734
5735#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5736 uint64_t u64MaskedCr0;
5737 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5738 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5739 else
5740 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5741 uint64_t const u64GuestCr0 = u64MaskedCr0;
5742#else
5743 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5744#endif
5745
5746 uint16_t u16Value;
5747 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5748 u16Value = (uint16_t)u64GuestCr0;
5749 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5750 u16Value = (uint16_t)u64GuestCr0 | 0xffe0;
5751 else
5752 u16Value = (uint16_t)u64GuestCr0 | 0xfff0;
5753
5754 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
5755 if (rcStrict == VINF_SUCCESS)
5756 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5757 return rcStrict;
5758}
5759
5760
5761/**
5762 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'.
5763 */
5764#define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \
5765 do \
5766 { \
5767 int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \
5768 if (RT_SUCCESS(rcX)) \
5769 { /* likely */ } \
5770 else \
5771 { \
5772 /* Either invalid PDPTEs or CR3 second-level translation failed. Raise #GP(0) either way. */ \
5773 Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \
5774 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
5775 } \
5776 } while (0)
5777
5778
5779/**
5780 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5781 *
5782 * @param iCrReg The CRx register to write (valid).
5783 * @param uNewCrX The new value.
5784 * @param enmAccessCrX The instruction that caused the CrX load.
5785 * @param iGReg The general register in case of a 'mov CRx,GReg'
5786 * instruction.
5787 */
5788IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5789{
5790 VBOXSTRICTRC rcStrict;
5791 int rc;
5792#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
5793 RT_NOREF2(iGReg, enmAccessCrX);
5794#endif
5795
5796 /*
5797 * Try store it.
5798 * Unfortunately, CPUM only does a tiny bit of the work.
5799 */
5800 switch (iCrReg)
5801 {
5802 case 0:
5803 {
5804 /*
5805 * Perform checks.
5806 */
5807 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5808
5809 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0;
5810 uint32_t const fValid = CPUMGetGuestCR0ValidMask();
5811
5812 /* ET is hardcoded on 486 and later. */
5813 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5814 uNewCrX |= X86_CR0_ET;
5815 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5816 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5817 {
5818 uNewCrX &= fValid;
5819 uNewCrX |= X86_CR0_ET;
5820 }
5821 else
5822 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5823
5824 /* Check for reserved bits. */
5825 if (uNewCrX & ~(uint64_t)fValid)
5826 {
5827 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5828 return iemRaiseGeneralProtectionFault0(pVCpu);
5829 }
5830
5831 /* Check for invalid combinations. */
5832 if ( (uNewCrX & X86_CR0_PG)
5833 && !(uNewCrX & X86_CR0_PE) )
5834 {
5835 Log(("Trying to set CR0.PG without CR0.PE\n"));
5836 return iemRaiseGeneralProtectionFault0(pVCpu);
5837 }
5838
5839 if ( !(uNewCrX & X86_CR0_CD)
5840 && (uNewCrX & X86_CR0_NW) )
5841 {
5842 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5843 return iemRaiseGeneralProtectionFault0(pVCpu);
5844 }
5845
5846 if ( !(uNewCrX & X86_CR0_PG)
5847 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE))
5848 {
5849 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
5850 return iemRaiseGeneralProtectionFault0(pVCpu);
5851 }
5852
5853 /* Long mode consistency checks. */
5854 if ( (uNewCrX & X86_CR0_PG)
5855 && !(uOldCrX & X86_CR0_PG)
5856 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5857 {
5858 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE))
5859 {
5860 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5861 return iemRaiseGeneralProtectionFault0(pVCpu);
5862 }
5863 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long)
5864 {
5865 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5866 return iemRaiseGeneralProtectionFault0(pVCpu);
5867 }
5868 }
5869
5870 /* Check for bits that must remain set or cleared in VMX operation,
5871 see Intel spec. 23.8 "Restrictions on VMX operation". */
5872 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
5873 {
5874#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5875 uint64_t const uCr0Fixed0 = IEM_VMX_IS_NON_ROOT_MODE(pVCpu) ? iemVmxGetCr0Fixed0(pVCpu) : VMX_V_CR0_FIXED0;
5876#else
5877 uint64_t const uCr0Fixed0 = VMX_V_CR0_FIXED0;
5878#endif
5879 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
5880 {
5881 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
5882 return iemRaiseGeneralProtectionFault0(pVCpu);
5883 }
5884
5885 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5886 if (uNewCrX & ~uCr0Fixed1)
5887 {
5888 Log(("Trying to set reserved CR0 bits in VMX operation: NewCr0=%#llx MB0=%#llx\n", uNewCrX, uCr0Fixed1));
5889 return iemRaiseGeneralProtectionFault0(pVCpu);
5890 }
5891 }
5892
5893 /*
5894 * SVM nested-guest CR0 write intercepts.
5895 */
5896 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5897 {
5898 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5899 IEM_SVM_UPDATE_NRIP(pVCpu);
5900 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5901 }
5902 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5903 {
5904 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5905 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5906 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5907 {
5908 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5909 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
5910 IEM_SVM_UPDATE_NRIP(pVCpu);
5911 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
5912 }
5913 }
5914
5915 /*
5916 * Change EFER.LMA if entering or leaving long mode.
5917 */
5918 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
5919 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5920 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5921 {
5922 if (uNewCrX & X86_CR0_PG)
5923 NewEFER |= MSR_K6_EFER_LMA;
5924 else
5925 NewEFER &= ~MSR_K6_EFER_LMA;
5926
5927 CPUMSetGuestEFER(pVCpu, NewEFER);
5928 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER);
5929 }
5930
5931 /*
5932 * Inform PGM.
5933 */
5934 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW))
5935 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
5936 {
5937 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX
5938 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
5939 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5940 { /* likely */ }
5941 else
5942 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
5943 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
5944 AssertRCReturn(rc, rc);
5945 /* ignore informational status codes */
5946 }
5947
5948 /*
5949 * Change CR0.
5950 */
5951 CPUMSetGuestCR0(pVCpu, uNewCrX);
5952 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
5953
5954 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
5955 false /* fForce */);
5956 break;
5957 }
5958
5959 /*
5960 * CR2 can be changed without any restrictions.
5961 */
5962 case 2:
5963 {
5964 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
5965 {
5966 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5967 IEM_SVM_UPDATE_NRIP(pVCpu);
5968 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
5969 }
5970 pVCpu->cpum.GstCtx.cr2 = uNewCrX;
5971 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2;
5972 rcStrict = VINF_SUCCESS;
5973 break;
5974 }
5975
5976 /*
5977 * CR3 is relatively simple, although AMD and Intel have different
5978 * accounts of how setting reserved bits are handled. We take intel's
5979 * word for the lower bits and AMD's for the high bits (63:52). The
5980 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5981 * on this.
5982 */
5983 /** @todo Testcase: Setting reserved bits in CR3, especially before
5984 * enabling paging. */
5985 case 3:
5986 {
5987 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5988
5989 /* Bit 63 being clear in the source operand with PCIDE indicates no invalidations are required. */
5990 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)
5991 && (uNewCrX & RT_BIT_64(63)))
5992 {
5993 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
5994 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
5995 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
5996 * Paging-Structure Caches". */
5997 uNewCrX &= ~RT_BIT_64(63);
5998 }
5999
6000 /* Check / mask the value. */
6001#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6002 /* See Intel spec. 27.2.2 "EPT Translation Mechanism" footnote. */
6003 uint64_t const fInvPhysMask = !CPUMIsGuestVmxEptPagingEnabledEx(IEM_GET_CTX(pVCpu))
6004 ? (UINT64_MAX << IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
6005 : (~X86_CR3_EPT_PAGE_MASK & X86_PAGE_4K_BASE_MASK);
6006#else
6007 uint64_t const fInvPhysMask = UINT64_C(0xfff0000000000000);
6008#endif
6009 if (uNewCrX & fInvPhysMask)
6010 {
6011 /** @todo Should we raise this only for 64-bit mode like Intel claims? AMD is
6012 * very vague in this area. As mentioned above, need testcase on real
6013 * hardware... Sigh. */
6014 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
6015 return iemRaiseGeneralProtectionFault0(pVCpu);
6016 }
6017
6018 uint64_t fValid;
6019 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6020 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME))
6021 {
6022 /** @todo Redundant? This value has already been validated above. */
6023 fValid = UINT64_C(0x000fffffffffffff);
6024 }
6025 else
6026 fValid = UINT64_C(0xffffffff);
6027 if (uNewCrX & ~fValid)
6028 {
6029 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
6030 uNewCrX, uNewCrX & ~fValid));
6031 uNewCrX &= fValid;
6032 }
6033
6034 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
6035 {
6036 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6037 IEM_SVM_UPDATE_NRIP(pVCpu);
6038 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
6039 }
6040
6041 /* Inform PGM. */
6042 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
6043 {
6044 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
6045 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6046 { /* likely */ }
6047 else
6048 {
6049 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6050 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
6051 }
6052 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
6053 AssertRCReturn(rc, rc);
6054 /* ignore informational status codes */
6055 }
6056
6057 /* Make the change. */
6058 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
6059 AssertRCSuccessReturn(rc, rc);
6060
6061 rcStrict = VINF_SUCCESS;
6062 break;
6063 }
6064
6065 /*
6066 * CR4 is a bit more tedious as there are bits which cannot be cleared
6067 * under some circumstances and such.
6068 */
6069 case 4:
6070 {
6071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6072 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4;
6073
6074 /* Reserved bits. */
6075 uint32_t const fValid = CPUMGetGuestCR4ValidMask(pVCpu->CTX_SUFF(pVM));
6076 if (uNewCrX & ~(uint64_t)fValid)
6077 {
6078 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
6079 return iemRaiseGeneralProtectionFault0(pVCpu);
6080 }
6081
6082 bool const fPcide = !(uOldCrX & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
6083 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
6084
6085 /* PCIDE check. */
6086 if ( fPcide
6087 && ( !fLongMode
6088 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))))
6089 {
6090 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))));
6091 return iemRaiseGeneralProtectionFault0(pVCpu);
6092 }
6093
6094 /* PAE check. */
6095 if ( fLongMode
6096 && (uOldCrX & X86_CR4_PAE)
6097 && !(uNewCrX & X86_CR4_PAE))
6098 {
6099 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
6100 return iemRaiseGeneralProtectionFault0(pVCpu);
6101 }
6102
6103 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
6104 {
6105 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6106 IEM_SVM_UPDATE_NRIP(pVCpu);
6107 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
6108 }
6109
6110 /* Check for bits that must remain set or cleared in VMX operation,
6111 see Intel spec. 23.8 "Restrictions on VMX operation". */
6112 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
6113 {
6114 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6115 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
6116 {
6117 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
6118 return iemRaiseGeneralProtectionFault0(pVCpu);
6119 }
6120
6121 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6122 if (uNewCrX & ~uCr4Fixed1)
6123 {
6124 Log(("Trying to set reserved CR4 bits in VMX operation: NewCr4=%#llx MB0=%#llx\n", uNewCrX, uCr4Fixed1));
6125 return iemRaiseGeneralProtectionFault0(pVCpu);
6126 }
6127 }
6128
6129 /*
6130 * Notify PGM.
6131 */
6132 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
6133 {
6134 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
6135 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6136 { /* likely */ }
6137 else
6138 {
6139 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6140 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6141 }
6142 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6143 AssertRCReturn(rc, rc);
6144 /* ignore informational status codes */
6145 }
6146
6147 /*
6148 * Change it.
6149 */
6150 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
6151 AssertRCSuccessReturn(rc, rc);
6152 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
6153
6154 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6155 false /* fForce */);
6156 break;
6157 }
6158
6159 /*
6160 * CR8 maps to the APIC TPR.
6161 */
6162 case 8:
6163 {
6164 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
6165 if (uNewCrX & ~(uint64_t)0xf)
6166 {
6167 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
6168 return iemRaiseGeneralProtectionFault0(pVCpu);
6169 }
6170
6171#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6172 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6173 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
6174 {
6175 /*
6176 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
6177 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
6178 * cleared. Following this the processor performs TPR virtualization.
6179 *
6180 * However, we should not perform TPR virtualization immediately here but
6181 * after this instruction has completed.
6182 *
6183 * See Intel spec. 29.3 "Virtualizing CR8-based TPR Accesses"
6184 * See Intel spec. 27.1 "Architectural State Before A VM-exit"
6185 */
6186 uint32_t const uTpr = (uNewCrX & 0xf) << 4;
6187 Log(("iemCImpl_load_Cr%#x: Virtualizing TPR (%#x) write\n", iCrReg, uTpr));
6188 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
6189 iemVmxVirtApicSetPendingWrite(pVCpu, XAPIC_OFF_TPR);
6190 rcStrict = VINF_SUCCESS;
6191 break;
6192 }
6193#endif
6194
6195#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6196 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6197 {
6198 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
6199 {
6200 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6201 IEM_SVM_UPDATE_NRIP(pVCpu);
6202 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
6203 }
6204
6205 pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VTPR = uNewCrX;
6206 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
6207 {
6208 rcStrict = VINF_SUCCESS;
6209 break;
6210 }
6211 }
6212#endif
6213 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
6214 APICSetTpr(pVCpu, u8Tpr);
6215 rcStrict = VINF_SUCCESS;
6216 break;
6217 }
6218
6219 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6220 }
6221
6222 /*
6223 * Advance the RIP on success.
6224 */
6225 if (RT_SUCCESS(rcStrict))
6226 {
6227 if (rcStrict != VINF_SUCCESS)
6228 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6229 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6230 }
6231
6232 return rcStrict;
6233}
6234
6235
6236/**
6237 * Implements mov CRx,GReg.
6238 *
6239 * @param iCrReg The CRx register to write (valid).
6240 * @param iGReg The general register to load the CRx value from.
6241 */
6242IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
6243{
6244 if (pVCpu->iem.s.uCpl != 0)
6245 return iemRaiseGeneralProtectionFault0(pVCpu);
6246 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6247
6248 /*
6249 * Read the new value from the source register and call common worker.
6250 */
6251 uint64_t uNewCrX;
6252 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6253 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
6254 else
6255 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
6256
6257#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6258 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6259 {
6260 VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
6261 switch (iCrReg)
6262 {
6263 case 0:
6264 case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr); break;
6265 case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr); break;
6266 case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr); break;
6267 }
6268 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6269 return rcStrict;
6270 }
6271#endif
6272
6273 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
6274}
6275
6276
6277/**
6278 * Implements 'LMSW r/m16'
6279 *
6280 * @param u16NewMsw The new value.
6281 * @param GCPtrEffDst The guest-linear address of the source operand in case
6282 * of a memory operand. For register operand, pass
6283 * NIL_RTGCPTR.
6284 */
6285IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
6286{
6287 if (pVCpu->iem.s.uCpl != 0)
6288 return iemRaiseGeneralProtectionFault0(pVCpu);
6289 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6290 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6291
6292#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6293 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
6294 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6295 {
6296 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
6297 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6298 return rcStrict;
6299 }
6300#else
6301 RT_NOREF_PV(GCPtrEffDst);
6302#endif
6303
6304 /*
6305 * Compose the new CR0 value and call common worker.
6306 */
6307 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6308 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6309 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
6310}
6311
6312
6313/**
6314 * Implements 'CLTS'.
6315 */
6316IEM_CIMPL_DEF_0(iemCImpl_clts)
6317{
6318 if (pVCpu->iem.s.uCpl != 0)
6319 return iemRaiseGeneralProtectionFault0(pVCpu);
6320
6321 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6322 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0;
6323 uNewCr0 &= ~X86_CR0_TS;
6324
6325#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6326 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6327 {
6328 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrClts(pVCpu, cbInstr);
6329 if (rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
6330 uNewCr0 |= (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS);
6331 else if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6332 return rcStrict;
6333 }
6334#endif
6335
6336 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
6337}
6338
6339
6340/**
6341 * Implements mov GReg,DRx.
6342 *
6343 * @param iGReg The general register to store the DRx value in.
6344 * @param iDrReg The DRx register to read (0-7).
6345 */
6346IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
6347{
6348#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6349 /*
6350 * Check nested-guest VMX intercept.
6351 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6352 * over CPL and CR4.DE and even DR4/DR5 checks.
6353 *
6354 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6355 */
6356 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6357 {
6358 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_FROM_DRX, iDrReg, iGReg, cbInstr);
6359 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6360 return rcStrict;
6361 }
6362#endif
6363
6364 /*
6365 * Check preconditions.
6366 */
6367 /* Raise GPs. */
6368 if (pVCpu->iem.s.uCpl != 0)
6369 return iemRaiseGeneralProtectionFault0(pVCpu);
6370 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6371 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR0);
6372
6373 if ( (iDrReg == 4 || iDrReg == 5)
6374 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE) )
6375 {
6376 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
6377 return iemRaiseGeneralProtectionFault0(pVCpu);
6378 }
6379
6380 /* Raise #DB if general access detect is enabled. */
6381 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6382 {
6383 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
6384 return iemRaiseDebugException(pVCpu);
6385 }
6386
6387 /*
6388 * Read the debug register and store it in the specified general register.
6389 */
6390 uint64_t drX;
6391 switch (iDrReg)
6392 {
6393 case 0:
6394 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6395 drX = pVCpu->cpum.GstCtx.dr[0];
6396 break;
6397 case 1:
6398 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6399 drX = pVCpu->cpum.GstCtx.dr[1];
6400 break;
6401 case 2:
6402 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6403 drX = pVCpu->cpum.GstCtx.dr[2];
6404 break;
6405 case 3:
6406 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6407 drX = pVCpu->cpum.GstCtx.dr[3];
6408 break;
6409 case 6:
6410 case 4:
6411 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6412 drX = pVCpu->cpum.GstCtx.dr[6];
6413 drX |= X86_DR6_RA1_MASK;
6414 drX &= ~X86_DR6_RAZ_MASK;
6415 break;
6416 case 7:
6417 case 5:
6418 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6419 drX = pVCpu->cpum.GstCtx.dr[7];
6420 drX |=X86_DR7_RA1_MASK;
6421 drX &= ~X86_DR7_RAZ_MASK;
6422 break;
6423 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6424 }
6425
6426 /** @todo SVM nested-guest intercept for DR8-DR15? */
6427 /*
6428 * Check for any SVM nested-guest intercepts for the DRx read.
6429 */
6430 if (IEM_SVM_IS_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
6431 {
6432 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
6433 IEM_SVM_UPDATE_NRIP(pVCpu);
6434 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
6435 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6436 }
6437
6438 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6439 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
6440 else
6441 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
6442
6443 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6444 return VINF_SUCCESS;
6445}
6446
6447
6448/**
6449 * Implements mov DRx,GReg.
6450 *
6451 * @param iDrReg The DRx register to write (valid).
6452 * @param iGReg The general register to load the DRx value from.
6453 */
6454IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
6455{
6456#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6457 /*
6458 * Check nested-guest VMX intercept.
6459 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6460 * over CPL and CR4.DE and even DR4/DR5 checks.
6461 *
6462 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6463 */
6464 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6465 {
6466 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_TO_DRX, iDrReg, iGReg, cbInstr);
6467 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6468 return rcStrict;
6469 }
6470#endif
6471
6472 /*
6473 * Check preconditions.
6474 */
6475 if (pVCpu->iem.s.uCpl != 0)
6476 return iemRaiseGeneralProtectionFault0(pVCpu);
6477 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6478 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR4);
6479
6480 if (iDrReg == 4 || iDrReg == 5)
6481 {
6482 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6483 {
6484 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
6485 return iemRaiseGeneralProtectionFault0(pVCpu);
6486 }
6487 iDrReg += 2;
6488 }
6489
6490 /* Raise #DB if general access detect is enabled. */
6491 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
6492 * \#GP? */
6493 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6494 {
6495 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
6496 return iemRaiseDebugException(pVCpu);
6497 }
6498
6499 /*
6500 * Read the new value from the source register.
6501 */
6502 uint64_t uNewDrX;
6503 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6504 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
6505 else
6506 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
6507
6508 /*
6509 * Adjust it.
6510 */
6511 switch (iDrReg)
6512 {
6513 case 0:
6514 case 1:
6515 case 2:
6516 case 3:
6517 /* nothing to adjust */
6518 break;
6519
6520 case 6:
6521 if (uNewDrX & X86_DR6_MBZ_MASK)
6522 {
6523 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6524 return iemRaiseGeneralProtectionFault0(pVCpu);
6525 }
6526 uNewDrX |= X86_DR6_RA1_MASK;
6527 uNewDrX &= ~X86_DR6_RAZ_MASK;
6528 break;
6529
6530 case 7:
6531 if (uNewDrX & X86_DR7_MBZ_MASK)
6532 {
6533 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6534 return iemRaiseGeneralProtectionFault0(pVCpu);
6535 }
6536 uNewDrX |= X86_DR7_RA1_MASK;
6537 uNewDrX &= ~X86_DR7_RAZ_MASK;
6538 break;
6539
6540 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6541 }
6542
6543 /** @todo SVM nested-guest intercept for DR8-DR15? */
6544 /*
6545 * Check for any SVM nested-guest intercepts for the DRx write.
6546 */
6547 if (IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
6548 {
6549 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
6550 IEM_SVM_UPDATE_NRIP(pVCpu);
6551 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
6552 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6553 }
6554
6555 /*
6556 * Do the actual setting.
6557 */
6558 if (iDrReg < 4)
6559 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6560 else if (iDrReg == 6)
6561 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6562
6563 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
6564 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
6565
6566 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6567 return VINF_SUCCESS;
6568}
6569
6570
6571/**
6572 * Implements mov GReg,TRx.
6573 *
6574 * @param iGReg The general register to store the
6575 * TRx value in.
6576 * @param iTrReg The TRx register to read (6/7).
6577 */
6578IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg)
6579{
6580 /*
6581 * Check preconditions. NB: This instruction is 386/486 only.
6582 */
6583
6584 /* Raise GPs. */
6585 if (pVCpu->iem.s.uCpl != 0)
6586 return iemRaiseGeneralProtectionFault0(pVCpu);
6587 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6588
6589 if (iTrReg < 6 || iTrReg > 7)
6590 {
6591 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6592 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6593 return iemRaiseGeneralProtectionFault0(pVCpu);
6594 }
6595
6596 /*
6597 * Read the test register and store it in the specified general register.
6598 * This is currently a dummy implementation that only exists to satisfy
6599 * old debuggers like WDEB386 or OS/2 KDB which unconditionally read the
6600 * TR6/TR7 registers. Software which actually depends on the TR values
6601 * (different on 386/486) is exceedingly rare.
6602 */
6603 uint64_t trX;
6604 switch (iTrReg)
6605 {
6606 case 6:
6607 trX = 0; /* Currently a dummy. */
6608 break;
6609 case 7:
6610 trX = 0; /* Currently a dummy. */
6611 break;
6612 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6613 }
6614
6615 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)trX;
6616
6617 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6618 return VINF_SUCCESS;
6619}
6620
6621
6622/**
6623 * Implements mov TRx,GReg.
6624 *
6625 * @param iTrReg The TRx register to write (valid).
6626 * @param iGReg The general register to load the TRx
6627 * value from.
6628 */
6629IEM_CIMPL_DEF_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg)
6630{
6631 /*
6632 * Check preconditions. NB: This instruction is 386/486 only.
6633 */
6634
6635 /* Raise GPs. */
6636 if (pVCpu->iem.s.uCpl != 0)
6637 return iemRaiseGeneralProtectionFault0(pVCpu);
6638 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6639
6640 if (iTrReg < 6 || iTrReg > 7)
6641 {
6642 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6643 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6644 return iemRaiseGeneralProtectionFault0(pVCpu);
6645 }
6646
6647 /*
6648 * Read the new value from the source register.
6649 */
6650 uint64_t uNewTrX;
6651 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6652 uNewTrX = iemGRegFetchU64(pVCpu, iGReg);
6653 else
6654 uNewTrX = iemGRegFetchU32(pVCpu, iGReg);
6655
6656 /*
6657 * Here we would do the actual setting if this weren't a dummy implementation.
6658 * This is currently a dummy implementation that only exists to prevent
6659 * old debuggers like WDEB386 or OS/2 KDB from crashing.
6660 */
6661 RT_NOREF(uNewTrX);
6662
6663 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6664 return VINF_SUCCESS;
6665}
6666
6667
6668/**
6669 * Implements 'INVLPG m'.
6670 *
6671 * @param GCPtrPage The effective address of the page to invalidate.
6672 * @remarks Updates the RIP.
6673 */
6674IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
6675{
6676 /* ring-0 only. */
6677 if (pVCpu->iem.s.uCpl != 0)
6678 return iemRaiseGeneralProtectionFault0(pVCpu);
6679 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6680 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6681
6682#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6683 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6684 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6685 {
6686 Log(("invlpg: Guest intercept (%RGp) -> VM-exit\n", GCPtrPage));
6687 return iemVmxVmexitInstrInvlpg(pVCpu, GCPtrPage, cbInstr);
6688 }
6689#endif
6690
6691 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
6692 {
6693 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6694 IEM_SVM_UPDATE_NRIP(pVCpu);
6695 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
6696 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
6697 }
6698
6699 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
6700 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6701
6702 if (rc == VINF_SUCCESS)
6703 return VINF_SUCCESS;
6704 if (rc == VINF_PGM_SYNC_CR3)
6705 return iemSetPassUpStatus(pVCpu, rc);
6706
6707 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6708 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
6709 return rc;
6710}
6711
6712
6713/**
6714 * Implements INVPCID.
6715 *
6716 * @param iEffSeg The segment of the invpcid descriptor.
6717 * @param GCPtrInvpcidDesc The address of invpcid descriptor.
6718 * @param uInvpcidType The invalidation type.
6719 * @remarks Updates the RIP.
6720 */
6721IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType)
6722{
6723 /*
6724 * Check preconditions.
6725 */
6726 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
6727 return iemRaiseUndefinedOpcode(pVCpu);
6728
6729 /* When in VMX non-root mode and INVPCID is not enabled, it results in #UD. */
6730 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6731 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_INVPCID))
6732 {
6733 Log(("invpcid: Not enabled for nested-guest execution -> #UD\n"));
6734 return iemRaiseUndefinedOpcode(pVCpu);
6735 }
6736
6737 if (pVCpu->iem.s.uCpl != 0)
6738 {
6739 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
6740 return iemRaiseGeneralProtectionFault0(pVCpu);
6741 }
6742
6743 if (IEM_IS_V86_MODE(pVCpu))
6744 {
6745 Log(("invpcid: v8086 mode -> #GP(0)\n"));
6746 return iemRaiseGeneralProtectionFault0(pVCpu);
6747 }
6748
6749 /*
6750 * Check nested-guest intercept.
6751 *
6752 * INVPCID causes a VM-exit if "enable INVPCID" and "INVLPG exiting" are
6753 * both set. We have already checked the former earlier in this function.
6754 *
6755 * CPL and virtual-8086 mode checks take priority over this VM-exit.
6756 * See Intel spec. "25.1.1 Relative Priority of Faults and VM Exits".
6757 */
6758 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6759 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6760 {
6761 Log(("invpcid: Guest intercept -> #VM-exit\n"));
6762 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_INVPCID, VMXINSTRID_NONE, cbInstr);
6763 }
6764
6765 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
6766 {
6767 Log(("invpcid: invalid/unrecognized invpcid type %#RX64 -> #GP(0)\n", uInvpcidType));
6768 return iemRaiseGeneralProtectionFault0(pVCpu);
6769 }
6770 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6771
6772 /*
6773 * Fetch the invpcid descriptor from guest memory.
6774 */
6775 RTUINT128U uDesc;
6776 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
6777 if (rcStrict == VINF_SUCCESS)
6778 {
6779 /*
6780 * Validate the descriptor.
6781 */
6782 if (uDesc.s.Lo > 0xfff)
6783 {
6784 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
6785 return iemRaiseGeneralProtectionFault0(pVCpu);
6786 }
6787
6788 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
6789 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
6790 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4;
6791 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
6792 switch (uInvpcidType)
6793 {
6794 case X86_INVPCID_TYPE_INDV_ADDR:
6795 {
6796 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
6797 {
6798 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
6799 return iemRaiseGeneralProtectionFault0(pVCpu);
6800 }
6801 if ( !(uCr4 & X86_CR4_PCIDE)
6802 && uPcid != 0)
6803 {
6804 Log(("invpcid: invalid pcid %#x\n", uPcid));
6805 return iemRaiseGeneralProtectionFault0(pVCpu);
6806 }
6807
6808 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
6809 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6810 break;
6811 }
6812
6813 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
6814 {
6815 if ( !(uCr4 & X86_CR4_PCIDE)
6816 && uPcid != 0)
6817 {
6818 Log(("invpcid: invalid pcid %#x\n", uPcid));
6819 return iemRaiseGeneralProtectionFault0(pVCpu);
6820 }
6821 /* Invalidate all mappings associated with PCID except global translations. */
6822 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6823 break;
6824 }
6825
6826 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
6827 {
6828 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
6829 break;
6830 }
6831
6832 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
6833 {
6834 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6835 break;
6836 }
6837 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6838 }
6839 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6840 }
6841 return rcStrict;
6842}
6843
6844
6845/**
6846 * Implements INVD.
6847 */
6848IEM_CIMPL_DEF_0(iemCImpl_invd)
6849{
6850 if (pVCpu->iem.s.uCpl != 0)
6851 {
6852 Log(("invd: CPL != 0 -> #GP(0)\n"));
6853 return iemRaiseGeneralProtectionFault0(pVCpu);
6854 }
6855
6856 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6857 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_INVD, cbInstr);
6858
6859 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
6860
6861 /* We currently take no action here. */
6862 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6863 return VINF_SUCCESS;
6864}
6865
6866
6867/**
6868 * Implements WBINVD.
6869 */
6870IEM_CIMPL_DEF_0(iemCImpl_wbinvd)
6871{
6872 if (pVCpu->iem.s.uCpl != 0)
6873 {
6874 Log(("wbinvd: CPL != 0 -> #GP(0)\n"));
6875 return iemRaiseGeneralProtectionFault0(pVCpu);
6876 }
6877
6878 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6879 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WBINVD, cbInstr);
6880
6881 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
6882
6883 /* We currently take no action here. */
6884 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6885 return VINF_SUCCESS;
6886}
6887
6888
6889/** Opcode 0x0f 0xaa. */
6890IEM_CIMPL_DEF_0(iemCImpl_rsm)
6891{
6892 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
6893 NOREF(cbInstr);
6894 return iemRaiseUndefinedOpcode(pVCpu);
6895}
6896
6897
6898/**
6899 * Implements RDTSC.
6900 */
6901IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
6902{
6903 /*
6904 * Check preconditions.
6905 */
6906 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
6907 return iemRaiseUndefinedOpcode(pVCpu);
6908
6909 if (pVCpu->iem.s.uCpl != 0)
6910 {
6911 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6912 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
6913 {
6914 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6915 return iemRaiseGeneralProtectionFault0(pVCpu);
6916 }
6917 }
6918
6919 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6920 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
6921 {
6922 Log(("rdtsc: Guest intercept -> VM-exit\n"));
6923 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSC, cbInstr);
6924 }
6925
6926 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
6927 {
6928 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
6929 IEM_SVM_UPDATE_NRIP(pVCpu);
6930 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6931 }
6932
6933 /*
6934 * Do the job.
6935 */
6936 uint64_t uTicks = TMCpuTickGet(pVCpu);
6937#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
6938 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
6939#endif
6940 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
6941 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
6942 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */
6943 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6944 return VINF_SUCCESS;
6945}
6946
6947
6948/**
6949 * Implements RDTSC.
6950 */
6951IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
6952{
6953 /*
6954 * Check preconditions.
6955 */
6956 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
6957 return iemRaiseUndefinedOpcode(pVCpu);
6958
6959 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6960 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDTSCP))
6961 {
6962 Log(("rdtscp: Not enabled for VMX non-root mode -> #UD\n"));
6963 return iemRaiseUndefinedOpcode(pVCpu);
6964 }
6965
6966 if (pVCpu->iem.s.uCpl != 0)
6967 {
6968 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6969 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
6970 {
6971 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6972 return iemRaiseGeneralProtectionFault0(pVCpu);
6973 }
6974 }
6975
6976 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6977 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
6978 {
6979 Log(("rdtscp: Guest intercept -> VM-exit\n"));
6980 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSCP, cbInstr);
6981 }
6982 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
6983 {
6984 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
6985 IEM_SVM_UPDATE_NRIP(pVCpu);
6986 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6987 }
6988
6989 /*
6990 * Do the job.
6991 * Query the MSR first in case of trips to ring-3.
6992 */
6993 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
6994 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx);
6995 if (rcStrict == VINF_SUCCESS)
6996 {
6997 /* Low dword of the TSC_AUX msr only. */
6998 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
6999
7000 uint64_t uTicks = TMCpuTickGet(pVCpu);
7001#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7002 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7003#endif
7004 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7005 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7006 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */
7007 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7008 }
7009 return rcStrict;
7010}
7011
7012
7013/**
7014 * Implements RDPMC.
7015 */
7016IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
7017{
7018 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7019
7020 if ( pVCpu->iem.s.uCpl != 0
7021 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE))
7022 return iemRaiseGeneralProtectionFault0(pVCpu);
7023
7024 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7025 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDPMC_EXIT))
7026 {
7027 Log(("rdpmc: Guest intercept -> VM-exit\n"));
7028 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDPMC, cbInstr);
7029 }
7030
7031 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
7032 {
7033 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
7034 IEM_SVM_UPDATE_NRIP(pVCpu);
7035 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7036 }
7037
7038 /** @todo Emulate performance counters, for now just return 0. */
7039 pVCpu->cpum.GstCtx.rax = 0;
7040 pVCpu->cpum.GstCtx.rdx = 0;
7041 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7042 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
7043 * ecx but see @bugref{3472}! */
7044
7045 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7046 return VINF_SUCCESS;
7047}
7048
7049
7050/**
7051 * Implements RDMSR.
7052 */
7053IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
7054{
7055 /*
7056 * Check preconditions.
7057 */
7058 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7059 return iemRaiseUndefinedOpcode(pVCpu);
7060 if (pVCpu->iem.s.uCpl != 0)
7061 return iemRaiseGeneralProtectionFault0(pVCpu);
7062
7063 /*
7064 * Check nested-guest intercepts.
7065 */
7066#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7067 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7068 {
7069 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
7070 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
7071 }
7072#endif
7073
7074#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7075 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7076 {
7077 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */);
7078 if (rcStrict == VINF_SVM_VMEXIT)
7079 return VINF_SUCCESS;
7080 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7081 {
7082 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
7083 return rcStrict;
7084 }
7085 }
7086#endif
7087
7088 /*
7089 * Do the job.
7090 */
7091 RTUINT64U uValue;
7092 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7093 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7094
7095 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u);
7096 if (rcStrict == VINF_SUCCESS)
7097 {
7098 pVCpu->cpum.GstCtx.rax = uValue.s.Lo;
7099 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi;
7100 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7101
7102 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7103 return VINF_SUCCESS;
7104 }
7105
7106#ifndef IN_RING3
7107 /* Deferred to ring-3. */
7108 if (rcStrict == VINF_CPUM_R3_MSR_READ)
7109 {
7110 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
7111 return rcStrict;
7112 }
7113#endif
7114
7115 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7116 if (pVCpu->iem.s.cLogRelRdMsr < 32)
7117 {
7118 pVCpu->iem.s.cLogRelRdMsr++;
7119 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7120 }
7121 else
7122 Log(( "IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7123 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7124 return iemRaiseGeneralProtectionFault0(pVCpu);
7125}
7126
7127
7128/**
7129 * Implements WRMSR.
7130 */
7131IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
7132{
7133 /*
7134 * Check preconditions.
7135 */
7136 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7137 return iemRaiseUndefinedOpcode(pVCpu);
7138 if (pVCpu->iem.s.uCpl != 0)
7139 return iemRaiseGeneralProtectionFault0(pVCpu);
7140
7141 RTUINT64U uValue;
7142 uValue.s.Lo = pVCpu->cpum.GstCtx.eax;
7143 uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
7144
7145 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7146
7147 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7148 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7149
7150 /*
7151 * Check nested-guest intercepts.
7152 */
7153#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7154 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7155 {
7156 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
7157 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
7158 }
7159#endif
7160
7161#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7162 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7163 {
7164 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */);
7165 if (rcStrict == VINF_SVM_VMEXIT)
7166 return VINF_SUCCESS;
7167 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7168 {
7169 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
7170 return rcStrict;
7171 }
7172 }
7173#endif
7174
7175 /*
7176 * Do the job.
7177 */
7178 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
7179 if (rcStrict == VINF_SUCCESS)
7180 {
7181 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7182 return VINF_SUCCESS;
7183 }
7184
7185#ifndef IN_RING3
7186 /* Deferred to ring-3. */
7187 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
7188 {
7189 Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
7190 return rcStrict;
7191 }
7192#endif
7193
7194 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7195 if (pVCpu->iem.s.cLogRelWrMsr < 32)
7196 {
7197 pVCpu->iem.s.cLogRelWrMsr++;
7198 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7199 }
7200 else
7201 Log(( "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7202 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7203 return iemRaiseGeneralProtectionFault0(pVCpu);
7204}
7205
7206
7207/**
7208 * Implements 'IN eAX, port'.
7209 *
7210 * @param u16Port The source port.
7211 * @param fImm Whether the port was specified through an immediate operand
7212 * or the implicit DX register.
7213 * @param cbReg The register size.
7214 */
7215IEM_CIMPL_DEF_3(iemCImpl_in, uint16_t, u16Port, bool, fImm, uint8_t, cbReg)
7216{
7217 /*
7218 * CPL check
7219 */
7220 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7221 if (rcStrict != VINF_SUCCESS)
7222 return rcStrict;
7223
7224 /*
7225 * Check VMX nested-guest IO intercept.
7226 */
7227#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7228 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7229 {
7230 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_IN, u16Port, fImm, cbReg, cbInstr);
7231 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7232 return rcStrict;
7233 }
7234#else
7235 RT_NOREF(fImm);
7236#endif
7237
7238 /*
7239 * Check SVM nested-guest IO intercept.
7240 */
7241#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7242 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7243 {
7244 uint8_t cAddrSizeBits;
7245 switch (pVCpu->iem.s.enmEffAddrMode)
7246 {
7247 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7248 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7249 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7250 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7251 }
7252 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7253 false /* fRep */, false /* fStrIo */, cbInstr);
7254 if (rcStrict == VINF_SVM_VMEXIT)
7255 return VINF_SUCCESS;
7256 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7257 {
7258 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7259 VBOXSTRICTRC_VAL(rcStrict)));
7260 return rcStrict;
7261 }
7262 }
7263#endif
7264
7265 /*
7266 * Perform the I/O.
7267 */
7268 uint32_t u32Value = 0;
7269 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg);
7270 if (IOM_SUCCESS(rcStrict))
7271 {
7272 switch (cbReg)
7273 {
7274 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break;
7275 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break;
7276 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break;
7277 default: AssertFailedReturn(VERR_IEM_IPE_3);
7278 }
7279 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7280 pVCpu->iem.s.cPotentialExits++;
7281 if (rcStrict != VINF_SUCCESS)
7282 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7283 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
7284
7285 /*
7286 * Check for I/O breakpoints.
7287 */
7288 uint32_t const uDr7 = pVCpu->cpum.GstCtx.dr[7];
7289 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
7290 && X86_DR7_ANY_RW_IO(uDr7)
7291 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7292 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
7293 {
7294 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7295 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, IEM_GET_CTX(pVCpu), u16Port, cbReg);
7296 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7297 rcStrict = iemRaiseDebugException(pVCpu);
7298 }
7299 }
7300
7301 return rcStrict;
7302}
7303
7304
7305/**
7306 * Implements 'IN eAX, DX'.
7307 *
7308 * @param cbReg The register size.
7309 */
7310IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
7311{
7312 return IEM_CIMPL_CALL_3(iemCImpl_in, pVCpu->cpum.GstCtx.dx, false /* fImm */, cbReg);
7313}
7314
7315
7316/**
7317 * Implements 'OUT port, eAX'.
7318 *
7319 * @param u16Port The destination port.
7320 * @param fImm Whether the port was specified through an immediate operand
7321 * or the implicit DX register.
7322 * @param cbReg The register size.
7323 */
7324IEM_CIMPL_DEF_3(iemCImpl_out, uint16_t, u16Port, bool, fImm, uint8_t, cbReg)
7325{
7326 /*
7327 * CPL check
7328 */
7329 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7330 if (rcStrict != VINF_SUCCESS)
7331 return rcStrict;
7332
7333 /*
7334 * Check VMX nested-guest I/O intercept.
7335 */
7336#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7337 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7338 {
7339 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_OUT, u16Port, fImm, cbReg, cbInstr);
7340 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7341 return rcStrict;
7342 }
7343#else
7344 RT_NOREF(fImm);
7345#endif
7346
7347 /*
7348 * Check SVM nested-guest I/O intercept.
7349 */
7350#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7351 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7352 {
7353 uint8_t cAddrSizeBits;
7354 switch (pVCpu->iem.s.enmEffAddrMode)
7355 {
7356 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7357 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7358 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7359 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7360 }
7361 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7362 false /* fRep */, false /* fStrIo */, cbInstr);
7363 if (rcStrict == VINF_SVM_VMEXIT)
7364 return VINF_SUCCESS;
7365 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7366 {
7367 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7368 VBOXSTRICTRC_VAL(rcStrict)));
7369 return rcStrict;
7370 }
7371 }
7372#endif
7373
7374 /*
7375 * Perform the I/O.
7376 */
7377 uint32_t u32Value;
7378 switch (cbReg)
7379 {
7380 case 1: u32Value = pVCpu->cpum.GstCtx.al; break;
7381 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break;
7382 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break;
7383 default: AssertFailedReturn(VERR_IEM_IPE_4);
7384 }
7385 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg);
7386 if (IOM_SUCCESS(rcStrict))
7387 {
7388 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7389 pVCpu->iem.s.cPotentialExits++;
7390 if (rcStrict != VINF_SUCCESS)
7391 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7392 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
7393
7394 /*
7395 * Check for I/O breakpoints.
7396 */
7397 uint32_t const uDr7 = pVCpu->cpum.GstCtx.dr[7];
7398 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
7399 && X86_DR7_ANY_RW_IO(uDr7)
7400 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7401 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
7402 {
7403 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7404 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, IEM_GET_CTX(pVCpu), u16Port, cbReg);
7405 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7406 rcStrict = iemRaiseDebugException(pVCpu);
7407 }
7408 }
7409 return rcStrict;
7410}
7411
7412
7413/**
7414 * Implements 'OUT DX, eAX'.
7415 *
7416 * @param cbReg The register size.
7417 */
7418IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
7419{
7420 return IEM_CIMPL_CALL_3(iemCImpl_out, pVCpu->cpum.GstCtx.dx, false /* fImm */, cbReg);
7421}
7422
7423
7424/**
7425 * Implements 'CLI'.
7426 */
7427IEM_CIMPL_DEF_0(iemCImpl_cli)
7428{
7429 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7430 uint32_t const fEflOld = fEfl;
7431
7432 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7433 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7434 {
7435 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7436 if (!(fEfl & X86_EFL_VM))
7437 {
7438 if (pVCpu->iem.s.uCpl <= uIopl)
7439 fEfl &= ~X86_EFL_IF;
7440 else if ( pVCpu->iem.s.uCpl == 3
7441 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) )
7442 fEfl &= ~X86_EFL_VIF;
7443 else
7444 return iemRaiseGeneralProtectionFault0(pVCpu);
7445 }
7446 /* V8086 */
7447 else if (uIopl == 3)
7448 fEfl &= ~X86_EFL_IF;
7449 else if ( uIopl < 3
7450 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
7451 fEfl &= ~X86_EFL_VIF;
7452 else
7453 return iemRaiseGeneralProtectionFault0(pVCpu);
7454 }
7455 /* real mode */
7456 else
7457 fEfl &= ~X86_EFL_IF;
7458
7459 /* Commit. */
7460 IEMMISC_SET_EFL(pVCpu, fEfl);
7461 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7462 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
7463 return VINF_SUCCESS;
7464}
7465
7466
7467/**
7468 * Implements 'STI'.
7469 */
7470IEM_CIMPL_DEF_0(iemCImpl_sti)
7471{
7472 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7473 uint32_t const fEflOld = fEfl;
7474
7475 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7476 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7477 {
7478 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7479 if (!(fEfl & X86_EFL_VM))
7480 {
7481 if (pVCpu->iem.s.uCpl <= uIopl)
7482 fEfl |= X86_EFL_IF;
7483 else if ( pVCpu->iem.s.uCpl == 3
7484 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI)
7485 && !(fEfl & X86_EFL_VIP) )
7486 fEfl |= X86_EFL_VIF;
7487 else
7488 return iemRaiseGeneralProtectionFault0(pVCpu);
7489 }
7490 /* V8086 */
7491 else if (uIopl == 3)
7492 fEfl |= X86_EFL_IF;
7493 else if ( uIopl < 3
7494 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)
7495 && !(fEfl & X86_EFL_VIP) )
7496 fEfl |= X86_EFL_VIF;
7497 else
7498 return iemRaiseGeneralProtectionFault0(pVCpu);
7499 }
7500 /* real mode */
7501 else
7502 fEfl |= X86_EFL_IF;
7503
7504 /* Commit. */
7505 IEMMISC_SET_EFL(pVCpu, fEfl);
7506 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7507 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
7508 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
7509 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
7510 return VINF_SUCCESS;
7511}
7512
7513
7514/**
7515 * Implements 'HLT'.
7516 */
7517IEM_CIMPL_DEF_0(iemCImpl_hlt)
7518{
7519 if (pVCpu->iem.s.uCpl != 0)
7520 return iemRaiseGeneralProtectionFault0(pVCpu);
7521
7522 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7523 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_HLT_EXIT))
7524 {
7525 Log2(("hlt: Guest intercept -> VM-exit\n"));
7526 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_HLT, cbInstr);
7527 }
7528
7529 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
7530 {
7531 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
7532 IEM_SVM_UPDATE_NRIP(pVCpu);
7533 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7534 }
7535
7536 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7537 return VINF_EM_HALT;
7538}
7539
7540
7541/**
7542 * Implements 'MONITOR'.
7543 */
7544IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
7545{
7546 /*
7547 * Permission checks.
7548 */
7549 if (pVCpu->iem.s.uCpl != 0)
7550 {
7551 Log2(("monitor: CPL != 0\n"));
7552 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
7553 }
7554 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7555 {
7556 Log2(("monitor: Not in CPUID\n"));
7557 return iemRaiseUndefinedOpcode(pVCpu);
7558 }
7559
7560 /*
7561 * Check VMX guest-intercept.
7562 * This should be considered a fault-like VM-exit.
7563 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7564 */
7565 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7566 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MONITOR_EXIT))
7567 {
7568 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7569 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_MONITOR, cbInstr);
7570 }
7571
7572 /*
7573 * Gather the operands and validate them.
7574 */
7575 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
7576 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7577 uint32_t uEdx = pVCpu->cpum.GstCtx.edx;
7578/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
7579 * \#GP first. */
7580 if (uEcx != 0)
7581 {
7582 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
7583 return iemRaiseGeneralProtectionFault0(pVCpu);
7584 }
7585
7586 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
7587 if (rcStrict != VINF_SUCCESS)
7588 return rcStrict;
7589
7590 RTGCPHYS GCPhysMem;
7591 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7592 if (rcStrict != VINF_SUCCESS)
7593 return rcStrict;
7594
7595#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7596 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7597 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7598 {
7599 /*
7600 * MONITOR does not access the memory, just monitors the address. However,
7601 * if the address falls in the APIC-access page, the address monitored must
7602 * instead be the corresponding address in the virtual-APIC page.
7603 *
7604 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7605 */
7606 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
7607 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7608 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7609 return rcStrict;
7610 }
7611#endif
7612
7613 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
7614 {
7615 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7616 IEM_SVM_UPDATE_NRIP(pVCpu);
7617 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7618 }
7619
7620 /*
7621 * Call EM to prepare the monitor/wait.
7622 */
7623 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem);
7624 Assert(rcStrict == VINF_SUCCESS);
7625
7626 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7627 return rcStrict;
7628}
7629
7630
7631/**
7632 * Implements 'MWAIT'.
7633 */
7634IEM_CIMPL_DEF_0(iemCImpl_mwait)
7635{
7636 /*
7637 * Permission checks.
7638 */
7639 if (pVCpu->iem.s.uCpl != 0)
7640 {
7641 Log2(("mwait: CPL != 0\n"));
7642 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
7643 * EFLAGS.VM then.) */
7644 return iemRaiseUndefinedOpcode(pVCpu);
7645 }
7646 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7647 {
7648 Log2(("mwait: Not in CPUID\n"));
7649 return iemRaiseUndefinedOpcode(pVCpu);
7650 }
7651
7652 /* Check VMX nested-guest intercept. */
7653 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7654 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MWAIT_EXIT))
7655 IEM_VMX_VMEXIT_MWAIT_RET(pVCpu, EMMonitorIsArmed(pVCpu), cbInstr);
7656
7657 /*
7658 * Gather the operands and validate them.
7659 */
7660 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7661 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7662 if (uEcx != 0)
7663 {
7664 /* Only supported extension is break on IRQ when IF=0. */
7665 if (uEcx > 1)
7666 {
7667 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
7668 return iemRaiseGeneralProtectionFault0(pVCpu);
7669 }
7670 uint32_t fMWaitFeatures = 0;
7671 uint32_t uIgnore = 0;
7672 CPUMGetGuestCpuId(pVCpu, 5, 0, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
7673 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7674 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7675 {
7676 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
7677 return iemRaiseGeneralProtectionFault0(pVCpu);
7678 }
7679
7680#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7681 /*
7682 * If the interrupt-window exiting control is set or a virtual-interrupt is pending
7683 * for delivery; and interrupts are disabled the processor does not enter its
7684 * mwait state but rather passes control to the next instruction.
7685 *
7686 * See Intel spec. 25.3 "Changes to Instruction Behavior In VMX Non-root Operation".
7687 */
7688 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7689 && !pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
7690 {
7691 if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT)
7692 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
7693 {
7694 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7695 return VINF_SUCCESS;
7696 }
7697 }
7698#endif
7699 }
7700
7701 /*
7702 * Check SVM nested-guest mwait intercepts.
7703 */
7704 if ( IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
7705 && EMMonitorIsArmed(pVCpu))
7706 {
7707 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
7708 IEM_SVM_UPDATE_NRIP(pVCpu);
7709 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7710 }
7711 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
7712 {
7713 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
7714 IEM_SVM_UPDATE_NRIP(pVCpu);
7715 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7716 }
7717
7718 /*
7719 * Call EM to prepare the monitor/wait.
7720 */
7721 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
7722
7723 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7724 return rcStrict;
7725}
7726
7727
7728/**
7729 * Implements 'SWAPGS'.
7730 */
7731IEM_CIMPL_DEF_0(iemCImpl_swapgs)
7732{
7733 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
7734
7735 /*
7736 * Permission checks.
7737 */
7738 if (pVCpu->iem.s.uCpl != 0)
7739 {
7740 Log2(("swapgs: CPL != 0\n"));
7741 return iemRaiseUndefinedOpcode(pVCpu);
7742 }
7743
7744 /*
7745 * Do the job.
7746 */
7747 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_GS);
7748 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
7749 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base;
7750 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase;
7751
7752 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7753 return VINF_SUCCESS;
7754}
7755
7756
7757/**
7758 * Implements 'CPUID'.
7759 */
7760IEM_CIMPL_DEF_0(iemCImpl_cpuid)
7761{
7762 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7763 {
7764 Log2(("cpuid: Guest intercept -> VM-exit\n"));
7765 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_CPUID, cbInstr);
7766 }
7767
7768 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
7769 {
7770 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
7771 IEM_SVM_UPDATE_NRIP(pVCpu);
7772 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7773 }
7774
7775 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.cs.Attr.n.u1Long,
7776 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
7777 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff);
7778 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff);
7779 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7780 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff);
7781 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
7782
7783 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7784 pVCpu->iem.s.cPotentialExits++;
7785 return VINF_SUCCESS;
7786}
7787
7788
7789/**
7790 * Implements 'AAD'.
7791 *
7792 * @param bImm The immediate operand.
7793 */
7794IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
7795{
7796 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
7797 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
7798 pVCpu->cpum.GstCtx.ax = al;
7799 iemHlpUpdateArithEFlagsU8(pVCpu, al,
7800 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
7801 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
7802
7803 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7804 return VINF_SUCCESS;
7805}
7806
7807
7808/**
7809 * Implements 'AAM'.
7810 *
7811 * @param bImm The immediate operand. Cannot be 0.
7812 */
7813IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
7814{
7815 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
7816
7817 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
7818 uint8_t const al = (uint8_t)ax % bImm;
7819 uint8_t const ah = (uint8_t)ax / bImm;
7820 pVCpu->cpum.GstCtx.ax = (ah << 8) + al;
7821 iemHlpUpdateArithEFlagsU8(pVCpu, al,
7822 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
7823 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
7824
7825 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7826 return VINF_SUCCESS;
7827}
7828
7829
7830/**
7831 * Implements 'DAA'.
7832 */
7833IEM_CIMPL_DEF_0(iemCImpl_daa)
7834{
7835 uint8_t const al = pVCpu->cpum.GstCtx.al;
7836 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
7837
7838 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7839 || (al & 0xf) >= 10)
7840 {
7841 pVCpu->cpum.GstCtx.al = al + 6;
7842 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7843 }
7844 else
7845 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7846
7847 if (al >= 0x9a || fCarry)
7848 {
7849 pVCpu->cpum.GstCtx.al += 0x60;
7850 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7851 }
7852 else
7853 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7854
7855 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7856 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7857 return VINF_SUCCESS;
7858}
7859
7860
7861/**
7862 * Implements 'DAS'.
7863 */
7864IEM_CIMPL_DEF_0(iemCImpl_das)
7865{
7866 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al;
7867 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
7868
7869 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7870 || (uInputAL & 0xf) >= 10)
7871 {
7872 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7873 if (uInputAL < 6)
7874 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7875 pVCpu->cpum.GstCtx.al = uInputAL - 6;
7876 }
7877 else
7878 {
7879 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7880 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7881 }
7882
7883 if (uInputAL >= 0x9a || fCarry)
7884 {
7885 pVCpu->cpum.GstCtx.al -= 0x60;
7886 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7887 }
7888
7889 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7890 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7891 return VINF_SUCCESS;
7892}
7893
7894
7895/**
7896 * Implements 'AAA'.
7897 */
7898IEM_CIMPL_DEF_0(iemCImpl_aaa)
7899{
7900 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
7901 {
7902 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7903 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
7904 {
7905 iemAImpl_add_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.u32);
7906 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7907 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7908 }
7909 else
7910 {
7911 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7912 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7913 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7914 }
7915 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
7916 }
7917 else
7918 {
7919 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7920 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
7921 {
7922 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106);
7923 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7924 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7925 }
7926 else
7927 {
7928 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7929 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7930 }
7931 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
7932 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7933 }
7934
7935 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7936 return VINF_SUCCESS;
7937}
7938
7939
7940/**
7941 * Implements 'AAS'.
7942 */
7943IEM_CIMPL_DEF_0(iemCImpl_aas)
7944{
7945 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
7946 {
7947 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7948 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
7949 {
7950 iemAImpl_sub_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.u32);
7951 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7952 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7953 }
7954 else
7955 {
7956 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7957 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7958 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7959 }
7960 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
7961 }
7962 else
7963 {
7964 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7965 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
7966 {
7967 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106);
7968 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7969 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7970 }
7971 else
7972 {
7973 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7974 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7975 }
7976 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
7977 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7978 }
7979
7980 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7981 return VINF_SUCCESS;
7982}
7983
7984
7985/**
7986 * Implements the 16-bit version of 'BOUND'.
7987 *
7988 * @note We have separate 16-bit and 32-bit variants of this function due to
7989 * the decoder using unsigned parameters, whereas we want signed one to
7990 * do the job. This is significant for a recompiler.
7991 */
7992IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
7993{
7994 /*
7995 * Check if the index is inside the bounds, otherwise raise #BR.
7996 */
7997 if ( idxArray >= idxLowerBound
7998 && idxArray <= idxUpperBound)
7999 {
8000 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8001 return VINF_SUCCESS;
8002 }
8003
8004 return iemRaiseBoundRangeExceeded(pVCpu);
8005}
8006
8007
8008/**
8009 * Implements the 32-bit version of 'BOUND'.
8010 */
8011IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
8012{
8013 /*
8014 * Check if the index is inside the bounds, otherwise raise #BR.
8015 */
8016 if ( idxArray >= idxLowerBound
8017 && idxArray <= idxUpperBound)
8018 {
8019 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8020 return VINF_SUCCESS;
8021 }
8022
8023 return iemRaiseBoundRangeExceeded(pVCpu);
8024}
8025
8026
8027
8028/*
8029 * Instantiate the various string operation combinations.
8030 */
8031#define OP_SIZE 8
8032#define ADDR_SIZE 16
8033#include "IEMAllCImplStrInstr.cpp.h"
8034#define OP_SIZE 8
8035#define ADDR_SIZE 32
8036#include "IEMAllCImplStrInstr.cpp.h"
8037#define OP_SIZE 8
8038#define ADDR_SIZE 64
8039#include "IEMAllCImplStrInstr.cpp.h"
8040
8041#define OP_SIZE 16
8042#define ADDR_SIZE 16
8043#include "IEMAllCImplStrInstr.cpp.h"
8044#define OP_SIZE 16
8045#define ADDR_SIZE 32
8046#include "IEMAllCImplStrInstr.cpp.h"
8047#define OP_SIZE 16
8048#define ADDR_SIZE 64
8049#include "IEMAllCImplStrInstr.cpp.h"
8050
8051#define OP_SIZE 32
8052#define ADDR_SIZE 16
8053#include "IEMAllCImplStrInstr.cpp.h"
8054#define OP_SIZE 32
8055#define ADDR_SIZE 32
8056#include "IEMAllCImplStrInstr.cpp.h"
8057#define OP_SIZE 32
8058#define ADDR_SIZE 64
8059#include "IEMAllCImplStrInstr.cpp.h"
8060
8061#define OP_SIZE 64
8062#define ADDR_SIZE 32
8063#include "IEMAllCImplStrInstr.cpp.h"
8064#define OP_SIZE 64
8065#define ADDR_SIZE 64
8066#include "IEMAllCImplStrInstr.cpp.h"
8067
8068
8069/**
8070 * Implements 'XGETBV'.
8071 */
8072IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
8073{
8074 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
8075 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8076 {
8077 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8078 switch (uEcx)
8079 {
8080 case 0:
8081 break;
8082
8083 case 1: /** @todo Implement XCR1 support. */
8084 default:
8085 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
8086 return iemRaiseGeneralProtectionFault0(pVCpu);
8087
8088 }
8089 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8090 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8091 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8092
8093 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8094 return VINF_SUCCESS;
8095 }
8096 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
8097 return iemRaiseUndefinedOpcode(pVCpu);
8098}
8099
8100
8101/**
8102 * Implements 'XSETBV'.
8103 */
8104IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
8105{
8106 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8107 {
8108 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
8109 {
8110 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
8111 IEM_SVM_UPDATE_NRIP(pVCpu);
8112 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8113 }
8114
8115 if (pVCpu->iem.s.uCpl == 0)
8116 {
8117 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8118
8119 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8120 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_XSETBV, cbInstr);
8121
8122 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8123 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx);
8124 switch (uEcx)
8125 {
8126 case 0:
8127 {
8128 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
8129 if (rc == VINF_SUCCESS)
8130 break;
8131 Assert(rc == VERR_CPUM_RAISE_GP_0);
8132 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8133 return iemRaiseGeneralProtectionFault0(pVCpu);
8134 }
8135
8136 case 1: /** @todo Implement XCR1 support. */
8137 default:
8138 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8139 return iemRaiseGeneralProtectionFault0(pVCpu);
8140
8141 }
8142
8143 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8144 return VINF_SUCCESS;
8145 }
8146
8147 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl));
8148 return iemRaiseGeneralProtectionFault0(pVCpu);
8149 }
8150 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
8151 return iemRaiseUndefinedOpcode(pVCpu);
8152}
8153
8154#ifndef RT_ARCH_ARM64
8155# ifdef IN_RING3
8156
8157/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
8158struct IEMCIMPLCX16ARGS
8159{
8160 PRTUINT128U pu128Dst;
8161 PRTUINT128U pu128RaxRdx;
8162 PRTUINT128U pu128RbxRcx;
8163 uint32_t *pEFlags;
8164# ifdef VBOX_STRICT
8165 uint32_t cCalls;
8166# endif
8167};
8168
8169/**
8170 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
8171 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
8172 */
8173static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPUCC pVCpu, void *pvUser)
8174{
8175 RT_NOREF(pVM, pVCpu);
8176 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
8177# ifdef VBOX_STRICT
8178 Assert(pArgs->cCalls == 0);
8179 pArgs->cCalls++;
8180# endif
8181
8182 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
8183 return VINF_SUCCESS;
8184}
8185
8186# endif /* IN_RING3 */
8187
8188/**
8189 * Implements 'CMPXCHG16B' fallback using rendezvous.
8190 */
8191IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
8192 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
8193{
8194# ifdef IN_RING3
8195 struct IEMCIMPLCX16ARGS Args;
8196 Args.pu128Dst = pu128Dst;
8197 Args.pu128RaxRdx = pu128RaxRdx;
8198 Args.pu128RbxRcx = pu128RbxRcx;
8199 Args.pEFlags = pEFlags;
8200# ifdef VBOX_STRICT
8201 Args.cCalls = 0;
8202# endif
8203 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
8204 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
8205 Assert(Args.cCalls == 1);
8206 if (rcStrict == VINF_SUCCESS)
8207 {
8208 /* Duplicated tail code. */
8209 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
8210 if (rcStrict == VINF_SUCCESS)
8211 {
8212 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
8213 if (!(*pEFlags & X86_EFL_ZF))
8214 {
8215 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo;
8216 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi;
8217 }
8218 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8219 }
8220 }
8221 return rcStrict;
8222# else
8223 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
8224 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
8225# endif
8226}
8227
8228#endif /* RT_ARCH_ARM64 */
8229
8230/**
8231 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
8232 *
8233 * This is implemented in C because it triggers a load like behaviour without
8234 * actually reading anything. Since that's not so common, it's implemented
8235 * here.
8236 *
8237 * @param iEffSeg The effective segment.
8238 * @param GCPtrEff The address of the image.
8239 */
8240IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8241{
8242 /*
8243 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
8244 */
8245 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
8246 if (rcStrict == VINF_SUCCESS)
8247 {
8248 RTGCPHYS GCPhysMem;
8249 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
8250 if (rcStrict == VINF_SUCCESS)
8251 {
8252#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8253 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8254 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8255 {
8256 /*
8257 * CLFLUSH/CLFLUSHOPT does not access the memory, but flushes the cache-line
8258 * that contains the address. However, if the address falls in the APIC-access
8259 * page, the address flushed must instead be the corresponding address in the
8260 * virtual-APIC page.
8261 *
8262 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
8263 */
8264 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
8265 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
8266 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
8267 return rcStrict;
8268 }
8269#endif
8270 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8271 return VINF_SUCCESS;
8272 }
8273 }
8274
8275 return rcStrict;
8276}
8277
8278
8279/**
8280 * Implements 'FINIT' and 'FNINIT'.
8281 *
8282 * @param fCheckXcpts Whether to check for umasked pending exceptions or
8283 * not.
8284 */
8285IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
8286{
8287 /*
8288 * Exceptions.
8289 */
8290 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8291 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
8292 return iemRaiseDeviceNotAvailable(pVCpu);
8293
8294 iemFpuActualizeStateForChange(pVCpu);
8295 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_X87);
8296
8297 /* FINIT: Raise #MF on pending exception(s): */
8298 if (fCheckXcpts && (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))
8299 return iemRaiseMathFault(pVCpu);
8300
8301 /*
8302 * Reset the state.
8303 */
8304 PX86XSAVEAREA pXState = &pVCpu->cpum.GstCtx.XState;
8305 pXState->x87.FCW = 0x37f;
8306 pXState->x87.FSW = 0;
8307 pXState->x87.FTW = 0x00; /* 0 - empty. */
8308 /** @todo Intel says the instruction and data pointers are not cleared on
8309 * 387, presume that 8087 and 287 doesn't do so either. */
8310 /** @todo test this stuff. */
8311 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
8312 {
8313 pXState->x87.FPUDP = 0;
8314 pXState->x87.DS = 0; //??
8315 pXState->x87.Rsrvd2 = 0;
8316 pXState->x87.FPUIP = 0;
8317 pXState->x87.CS = 0; //??
8318 pXState->x87.Rsrvd1 = 0;
8319 }
8320 pXState->x87.FOP = 0;
8321
8322 iemHlpUsedFpu(pVCpu);
8323 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8324 return VINF_SUCCESS;
8325}
8326
8327
8328/**
8329 * Implements 'FXSAVE'.
8330 *
8331 * @param iEffSeg The effective segment.
8332 * @param GCPtrEff The address of the image.
8333 * @param enmEffOpSize The operand size (only REX.W really matters).
8334 */
8335IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8336{
8337 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8338
8339 /*
8340 * Raise exceptions.
8341 */
8342 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
8343 return iemRaiseUndefinedOpcode(pVCpu);
8344 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8345 return iemRaiseDeviceNotAvailable(pVCpu);
8346
8347 /*
8348 * Access the memory.
8349 */
8350 void *pvMem512;
8351 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8352 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8353 if (rcStrict != VINF_SUCCESS)
8354 return rcStrict;
8355 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8356 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8357
8358 /*
8359 * Store the registers.
8360 */
8361 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8362 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
8363
8364 /* common for all formats */
8365 pDst->FCW = pSrc->FCW;
8366 pDst->FSW = pSrc->FSW;
8367 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8368 pDst->FOP = pSrc->FOP;
8369 pDst->MXCSR = pSrc->MXCSR;
8370 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8371 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8372 {
8373 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8374 * them for now... */
8375 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8376 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8377 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8378 pDst->aRegs[i].au32[3] = 0;
8379 }
8380
8381 /* FPU IP, CS, DP and DS. */
8382 pDst->FPUIP = pSrc->FPUIP;
8383 pDst->CS = pSrc->CS;
8384 pDst->FPUDP = pSrc->FPUDP;
8385 pDst->DS = pSrc->DS;
8386 if (enmEffOpSize == IEMMODE_64BIT)
8387 {
8388 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8389 pDst->Rsrvd1 = pSrc->Rsrvd1;
8390 pDst->Rsrvd2 = pSrc->Rsrvd2;
8391 }
8392 else
8393 {
8394 pDst->Rsrvd1 = 0;
8395 pDst->Rsrvd2 = 0;
8396 }
8397
8398 /* XMM registers. */
8399 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8400 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
8401 || pVCpu->iem.s.uCpl != 0)
8402 {
8403 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8404 for (uint32_t i = 0; i < cXmmRegs; i++)
8405 pDst->aXMM[i] = pSrc->aXMM[i];
8406 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8407 * right? */
8408 }
8409
8410 /*
8411 * Commit the memory.
8412 */
8413 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8414 if (rcStrict != VINF_SUCCESS)
8415 return rcStrict;
8416
8417 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8418 return VINF_SUCCESS;
8419}
8420
8421
8422/**
8423 * Implements 'FXRSTOR'.
8424 *
8425 * @param iEffSeg The effective segment register for @a GCPtrEff.
8426 * @param GCPtrEff The address of the image.
8427 * @param enmEffOpSize The operand size (only REX.W really matters).
8428 */
8429IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8430{
8431 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8432
8433 /*
8434 * Raise exceptions.
8435 */
8436 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
8437 return iemRaiseUndefinedOpcode(pVCpu);
8438 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8439 return iemRaiseDeviceNotAvailable(pVCpu);
8440
8441 /*
8442 * Access the memory.
8443 */
8444 void *pvMem512;
8445 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8446 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8447 if (rcStrict != VINF_SUCCESS)
8448 return rcStrict;
8449 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8450 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8451
8452 /*
8453 * Check the state for stuff which will #GP(0).
8454 */
8455 uint32_t const fMXCSR = pSrc->MXCSR;
8456 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8457 if (fMXCSR & ~fMXCSR_MASK)
8458 {
8459 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
8460 return iemRaiseGeneralProtectionFault0(pVCpu);
8461 }
8462
8463 /*
8464 * Load the registers.
8465 */
8466 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8467 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
8468
8469 /* common for all formats */
8470 pDst->FCW = pSrc->FCW;
8471 pDst->FSW = pSrc->FSW;
8472 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8473 pDst->FOP = pSrc->FOP;
8474 pDst->MXCSR = fMXCSR;
8475 /* (MXCSR_MASK is read-only) */
8476 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8477 {
8478 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8479 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8480 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8481 pDst->aRegs[i].au32[3] = 0;
8482 }
8483
8484 /* FPU IP, CS, DP and DS. */
8485 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8486 {
8487 pDst->FPUIP = pSrc->FPUIP;
8488 pDst->CS = pSrc->CS;
8489 pDst->Rsrvd1 = pSrc->Rsrvd1;
8490 pDst->FPUDP = pSrc->FPUDP;
8491 pDst->DS = pSrc->DS;
8492 pDst->Rsrvd2 = pSrc->Rsrvd2;
8493 }
8494 else
8495 {
8496 pDst->FPUIP = pSrc->FPUIP;
8497 pDst->CS = pSrc->CS;
8498 pDst->Rsrvd1 = 0;
8499 pDst->FPUDP = pSrc->FPUDP;
8500 pDst->DS = pSrc->DS;
8501 pDst->Rsrvd2 = 0;
8502 }
8503
8504 /* XMM registers. */
8505 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8506 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
8507 || pVCpu->iem.s.uCpl != 0)
8508 {
8509 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8510 for (uint32_t i = 0; i < cXmmRegs; i++)
8511 pDst->aXMM[i] = pSrc->aXMM[i];
8512 }
8513
8514 if (pDst->FSW & X86_FSW_ES)
8515 Log11(("fxrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8516 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8517
8518 /*
8519 * Commit the memory.
8520 */
8521 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
8522 if (rcStrict != VINF_SUCCESS)
8523 return rcStrict;
8524
8525 iemHlpUsedFpu(pVCpu);
8526 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8527 return VINF_SUCCESS;
8528}
8529
8530
8531/**
8532 * Implements 'XSAVE'.
8533 *
8534 * @param iEffSeg The effective segment.
8535 * @param GCPtrEff The address of the image.
8536 * @param enmEffOpSize The operand size (only REX.W really matters).
8537 */
8538IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8539{
8540 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8541
8542 /*
8543 * Raise exceptions.
8544 */
8545 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8546 return iemRaiseUndefinedOpcode(pVCpu);
8547 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8548 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8549 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8550 {
8551 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8552 return iemRaiseUndefinedOpcode(pVCpu);
8553 }
8554 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8555 return iemRaiseDeviceNotAvailable(pVCpu);
8556
8557 /*
8558 * Calc the requested mask.
8559 */
8560 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8561 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8562 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8563
8564/** @todo figure out the exact protocol for the memory access. Currently we
8565 * just need this crap to work halfways to make it possible to test
8566 * AVX instructions. */
8567/** @todo figure out the XINUSE and XMODIFIED */
8568
8569 /*
8570 * Access the x87 memory state.
8571 */
8572 /* The x87+SSE state. */
8573 void *pvMem512;
8574 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8575 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8576 if (rcStrict != VINF_SUCCESS)
8577 return rcStrict;
8578 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8579 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8580
8581 /* The header. */
8582 PX86XSAVEHDR pHdr;
8583 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
8584 if (rcStrict != VINF_SUCCESS)
8585 return rcStrict;
8586
8587 /*
8588 * Store the X87 state.
8589 */
8590 if (fReqComponents & XSAVE_C_X87)
8591 {
8592 /* common for all formats */
8593 pDst->FCW = pSrc->FCW;
8594 pDst->FSW = pSrc->FSW;
8595 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8596 pDst->FOP = pSrc->FOP;
8597 pDst->FPUIP = pSrc->FPUIP;
8598 pDst->CS = pSrc->CS;
8599 pDst->FPUDP = pSrc->FPUDP;
8600 pDst->DS = pSrc->DS;
8601 if (enmEffOpSize == IEMMODE_64BIT)
8602 {
8603 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8604 pDst->Rsrvd1 = pSrc->Rsrvd1;
8605 pDst->Rsrvd2 = pSrc->Rsrvd2;
8606 }
8607 else
8608 {
8609 pDst->Rsrvd1 = 0;
8610 pDst->Rsrvd2 = 0;
8611 }
8612 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8613 {
8614 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8615 * them for now... */
8616 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8617 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8618 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8619 pDst->aRegs[i].au32[3] = 0;
8620 }
8621
8622 }
8623
8624 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8625 {
8626 pDst->MXCSR = pSrc->MXCSR;
8627 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8628 }
8629
8630 if (fReqComponents & XSAVE_C_SSE)
8631 {
8632 /* XMM registers. */
8633 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8634 for (uint32_t i = 0; i < cXmmRegs; i++)
8635 pDst->aXMM[i] = pSrc->aXMM[i];
8636 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8637 * right? */
8638 }
8639
8640 /* Commit the x87 state bits. (probably wrong) */
8641 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8642 if (rcStrict != VINF_SUCCESS)
8643 return rcStrict;
8644
8645 /*
8646 * Store AVX state.
8647 */
8648 if (fReqComponents & XSAVE_C_YMM)
8649 {
8650 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8651 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8652 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
8653 PX86XSAVEYMMHI pCompDst;
8654 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8655 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
8656 if (rcStrict != VINF_SUCCESS)
8657 return rcStrict;
8658
8659 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8660 for (uint32_t i = 0; i < cXmmRegs; i++)
8661 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
8662
8663 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8664 if (rcStrict != VINF_SUCCESS)
8665 return rcStrict;
8666 }
8667
8668 /*
8669 * Update the header.
8670 */
8671 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
8672 | (fReqComponents & fXInUse);
8673
8674 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
8675 if (rcStrict != VINF_SUCCESS)
8676 return rcStrict;
8677
8678 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8679 return VINF_SUCCESS;
8680}
8681
8682
8683/**
8684 * Implements 'XRSTOR'.
8685 *
8686 * @param iEffSeg The effective segment.
8687 * @param GCPtrEff The address of the image.
8688 * @param enmEffOpSize The operand size (only REX.W really matters).
8689 */
8690IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8691{
8692 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8693
8694 /*
8695 * Raise exceptions.
8696 */
8697 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8698 return iemRaiseUndefinedOpcode(pVCpu);
8699 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8700 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8701 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8702 {
8703 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8704 return iemRaiseUndefinedOpcode(pVCpu);
8705 }
8706 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8707 return iemRaiseDeviceNotAvailable(pVCpu);
8708 if (GCPtrEff & 63)
8709 {
8710 /** @todo CPU/VM detection possible! \#AC might not be signal for
8711 * all/any misalignment sizes, intel says its an implementation detail. */
8712 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
8713 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
8714 && pVCpu->iem.s.uCpl == 3)
8715 return iemRaiseAlignmentCheckException(pVCpu);
8716 return iemRaiseGeneralProtectionFault0(pVCpu);
8717 }
8718
8719/** @todo figure out the exact protocol for the memory access. Currently we
8720 * just need this crap to work halfways to make it possible to test
8721 * AVX instructions. */
8722/** @todo figure out the XINUSE and XMODIFIED */
8723
8724 /*
8725 * Access the x87 memory state.
8726 */
8727 /* The x87+SSE state. */
8728 void *pvMem512;
8729 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8730 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8731 if (rcStrict != VINF_SUCCESS)
8732 return rcStrict;
8733 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8734 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8735
8736 /*
8737 * Calc the requested mask
8738 */
8739 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
8740 PCX86XSAVEHDR pHdrSrc;
8741 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,
8742 IEM_ACCESS_DATA_R, 0 /* checked above */);
8743 if (rcStrict != VINF_SUCCESS)
8744 return rcStrict;
8745
8746 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8747 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8748 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8749 uint64_t const fRstorMask = pHdrSrc->bmXState;
8750 uint64_t const fCompMask = pHdrSrc->bmXComp;
8751
8752 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8753
8754 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8755
8756 /* We won't need this any longer. */
8757 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
8758 if (rcStrict != VINF_SUCCESS)
8759 return rcStrict;
8760
8761 /*
8762 * Store the X87 state.
8763 */
8764 if (fReqComponents & XSAVE_C_X87)
8765 {
8766 if (fRstorMask & XSAVE_C_X87)
8767 {
8768 pDst->FCW = pSrc->FCW;
8769 pDst->FSW = pSrc->FSW;
8770 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8771 pDst->FOP = pSrc->FOP;
8772 pDst->FPUIP = pSrc->FPUIP;
8773 pDst->CS = pSrc->CS;
8774 pDst->FPUDP = pSrc->FPUDP;
8775 pDst->DS = pSrc->DS;
8776 if (enmEffOpSize == IEMMODE_64BIT)
8777 {
8778 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8779 pDst->Rsrvd1 = pSrc->Rsrvd1;
8780 pDst->Rsrvd2 = pSrc->Rsrvd2;
8781 }
8782 else
8783 {
8784 pDst->Rsrvd1 = 0;
8785 pDst->Rsrvd2 = 0;
8786 }
8787 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8788 {
8789 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8790 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8791 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8792 pDst->aRegs[i].au32[3] = 0;
8793 }
8794 if (pDst->FSW & X86_FSW_ES)
8795 Log11(("xrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8796 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8797 }
8798 else
8799 {
8800 pDst->FCW = 0x37f;
8801 pDst->FSW = 0;
8802 pDst->FTW = 0x00; /* 0 - empty. */
8803 pDst->FPUDP = 0;
8804 pDst->DS = 0; //??
8805 pDst->Rsrvd2= 0;
8806 pDst->FPUIP = 0;
8807 pDst->CS = 0; //??
8808 pDst->Rsrvd1= 0;
8809 pDst->FOP = 0;
8810 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8811 {
8812 pDst->aRegs[i].au32[0] = 0;
8813 pDst->aRegs[i].au32[1] = 0;
8814 pDst->aRegs[i].au32[2] = 0;
8815 pDst->aRegs[i].au32[3] = 0;
8816 }
8817 }
8818 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
8819 }
8820
8821 /* MXCSR */
8822 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8823 {
8824 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
8825 pDst->MXCSR = pSrc->MXCSR;
8826 else
8827 pDst->MXCSR = 0x1f80;
8828 }
8829
8830 /* XMM registers. */
8831 if (fReqComponents & XSAVE_C_SSE)
8832 {
8833 if (fRstorMask & XSAVE_C_SSE)
8834 {
8835 for (uint32_t i = 0; i < cXmmRegs; i++)
8836 pDst->aXMM[i] = pSrc->aXMM[i];
8837 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8838 * right? */
8839 }
8840 else
8841 {
8842 for (uint32_t i = 0; i < cXmmRegs; i++)
8843 {
8844 pDst->aXMM[i].au64[0] = 0;
8845 pDst->aXMM[i].au64[1] = 0;
8846 }
8847 }
8848 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
8849 }
8850
8851 /* Unmap the x87 state bits (so we've don't run out of mapping). */
8852 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
8853 if (rcStrict != VINF_SUCCESS)
8854 return rcStrict;
8855
8856 /*
8857 * Restore AVX state.
8858 */
8859 if (fReqComponents & XSAVE_C_YMM)
8860 {
8861 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8862 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
8863
8864 if (fRstorMask & XSAVE_C_YMM)
8865 {
8866 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8867 PCX86XSAVEYMMHI pCompSrc;
8868 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
8869 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8870 IEM_ACCESS_DATA_R, 0 /* checked above */);
8871 if (rcStrict != VINF_SUCCESS)
8872 return rcStrict;
8873
8874 for (uint32_t i = 0; i < cXmmRegs; i++)
8875 {
8876 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
8877 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
8878 }
8879
8880 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
8881 if (rcStrict != VINF_SUCCESS)
8882 return rcStrict;
8883 }
8884 else
8885 {
8886 for (uint32_t i = 0; i < cXmmRegs; i++)
8887 {
8888 pCompDst->aYmmHi[i].au64[0] = 0;
8889 pCompDst->aYmmHi[i].au64[1] = 0;
8890 }
8891 }
8892 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
8893 }
8894
8895 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8896 return VINF_SUCCESS;
8897}
8898
8899
8900
8901
8902/**
8903 * Implements 'STMXCSR'.
8904 *
8905 * @param iEffSeg The effective segment register for @a GCPtrEff.
8906 * @param GCPtrEff The address of the image.
8907 */
8908IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8909{
8910 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8911
8912 /*
8913 * Raise exceptions.
8914 */
8915 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
8916 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
8917 {
8918 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
8919 {
8920 /*
8921 * Do the job.
8922 */
8923 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
8924 if (rcStrict == VINF_SUCCESS)
8925 {
8926 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8927 return VINF_SUCCESS;
8928 }
8929 return rcStrict;
8930 }
8931 return iemRaiseDeviceNotAvailable(pVCpu);
8932 }
8933 return iemRaiseUndefinedOpcode(pVCpu);
8934}
8935
8936
8937/**
8938 * Implements 'VSTMXCSR'.
8939 *
8940 * @param iEffSeg The effective segment register for @a GCPtrEff.
8941 * @param GCPtrEff The address of the image.
8942 */
8943IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8944{
8945 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
8946
8947 /*
8948 * Raise exceptions.
8949 */
8950 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
8951 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
8952 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
8953 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8954 {
8955 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
8956 {
8957 /*
8958 * Do the job.
8959 */
8960 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
8961 if (rcStrict == VINF_SUCCESS)
8962 {
8963 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8964 return VINF_SUCCESS;
8965 }
8966 return rcStrict;
8967 }
8968 return iemRaiseDeviceNotAvailable(pVCpu);
8969 }
8970 return iemRaiseUndefinedOpcode(pVCpu);
8971}
8972
8973
8974/**
8975 * Implements 'LDMXCSR'.
8976 *
8977 * @param iEffSeg The effective segment register for @a GCPtrEff.
8978 * @param GCPtrEff The address of the image.
8979 */
8980IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8981{
8982 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8983
8984 /*
8985 * Raise exceptions.
8986 */
8987 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
8988 * happen after or before \#UD and \#EM? */
8989 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
8990 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
8991 {
8992 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
8993 {
8994 /*
8995 * Do the job.
8996 */
8997 uint32_t fNewMxCsr;
8998 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
8999 if (rcStrict == VINF_SUCCESS)
9000 {
9001 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9002 if (!(fNewMxCsr & ~fMxCsrMask))
9003 {
9004 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr;
9005 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9006 return VINF_SUCCESS;
9007 }
9008 Log(("lddmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
9009 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
9010 return iemRaiseGeneralProtectionFault0(pVCpu);
9011 }
9012 return rcStrict;
9013 }
9014 return iemRaiseDeviceNotAvailable(pVCpu);
9015 }
9016 return iemRaiseUndefinedOpcode(pVCpu);
9017}
9018
9019
9020/**
9021 * Commmon routine for fnstenv and fnsave.
9022 *
9023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9024 * @param enmEffOpSize The effective operand size.
9025 * @param uPtr Where to store the state.
9026 */
9027static void iemCImplCommonFpuStoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr)
9028{
9029 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9030 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.XState.x87;
9031 if (enmEffOpSize == IEMMODE_16BIT)
9032 {
9033 uPtr.pu16[0] = pSrcX87->FCW;
9034 uPtr.pu16[1] = pSrcX87->FSW;
9035 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
9036 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9037 {
9038 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
9039 * protected mode or long mode and we save it in real mode? And vice
9040 * versa? And with 32-bit operand size? I think CPU is storing the
9041 * effective address ((CS << 4) + IP) in the offset register and not
9042 * doing any address calculations here. */
9043 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
9044 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
9045 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
9046 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
9047 }
9048 else
9049 {
9050 uPtr.pu16[3] = pSrcX87->FPUIP;
9051 uPtr.pu16[4] = pSrcX87->CS;
9052 uPtr.pu16[5] = pSrcX87->FPUDP;
9053 uPtr.pu16[6] = pSrcX87->DS;
9054 }
9055 }
9056 else
9057 {
9058 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
9059 uPtr.pu16[0*2] = pSrcX87->FCW;
9060 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
9061 uPtr.pu16[1*2] = pSrcX87->FSW;
9062 uPtr.pu16[1*2+1] = 0xffff;
9063 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
9064 uPtr.pu16[2*2+1] = 0xffff;
9065 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9066 {
9067 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
9068 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
9069 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
9070 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
9071 }
9072 else
9073 {
9074 uPtr.pu32[3] = pSrcX87->FPUIP;
9075 uPtr.pu16[4*2] = pSrcX87->CS;
9076 uPtr.pu16[4*2+1] = pSrcX87->FOP;
9077 uPtr.pu32[5] = pSrcX87->FPUDP;
9078 uPtr.pu16[6*2] = pSrcX87->DS;
9079 uPtr.pu16[6*2+1] = 0xffff;
9080 }
9081 }
9082}
9083
9084
9085/**
9086 * Commmon routine for fldenv and frstor
9087 *
9088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9089 * @param enmEffOpSize The effective operand size.
9090 * @param uPtr Where to store the state.
9091 */
9092static void iemCImplCommonFpuRestoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr)
9093{
9094 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9095 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.XState.x87;
9096 if (enmEffOpSize == IEMMODE_16BIT)
9097 {
9098 pDstX87->FCW = uPtr.pu16[0];
9099 pDstX87->FSW = uPtr.pu16[1];
9100 pDstX87->FTW = uPtr.pu16[2];
9101 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9102 {
9103 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
9104 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
9105 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
9106 pDstX87->CS = 0;
9107 pDstX87->Rsrvd1= 0;
9108 pDstX87->DS = 0;
9109 pDstX87->Rsrvd2= 0;
9110 }
9111 else
9112 {
9113 pDstX87->FPUIP = uPtr.pu16[3];
9114 pDstX87->CS = uPtr.pu16[4];
9115 pDstX87->Rsrvd1= 0;
9116 pDstX87->FPUDP = uPtr.pu16[5];
9117 pDstX87->DS = uPtr.pu16[6];
9118 pDstX87->Rsrvd2= 0;
9119 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
9120 }
9121 }
9122 else
9123 {
9124 pDstX87->FCW = uPtr.pu16[0*2];
9125 pDstX87->FSW = uPtr.pu16[1*2];
9126 pDstX87->FTW = uPtr.pu16[2*2];
9127 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9128 {
9129 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
9130 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
9131 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
9132 pDstX87->CS = 0;
9133 pDstX87->Rsrvd1= 0;
9134 pDstX87->DS = 0;
9135 pDstX87->Rsrvd2= 0;
9136 }
9137 else
9138 {
9139 pDstX87->FPUIP = uPtr.pu32[3];
9140 pDstX87->CS = uPtr.pu16[4*2];
9141 pDstX87->Rsrvd1= 0;
9142 pDstX87->FOP = uPtr.pu16[4*2+1];
9143 pDstX87->FPUDP = uPtr.pu32[5];
9144 pDstX87->DS = uPtr.pu16[6*2];
9145 pDstX87->Rsrvd2= 0;
9146 }
9147 }
9148
9149 /* Make adjustments. */
9150 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
9151#ifdef LOG_ENABLED
9152 uint16_t const fOldFsw = pDstX87->FSW;
9153#endif
9154 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
9155 iemFpuRecalcExceptionStatus(pDstX87);
9156#ifdef LOG_ENABLED
9157 if ((pDstX87->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9158 Log11(("iemCImplCommonFpuRestoreEnv: %04x:%08RX64: %s FPU exception (FCW=%#x FSW=%#x -> %#x)\n",
9159 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fOldFsw & X86_FSW_ES ? "Supressed" : "Raised",
9160 pDstX87->FCW, fOldFsw, pDstX87->FSW));
9161#endif
9162
9163 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
9164 * exceptions are pending after loading the saved state? */
9165}
9166
9167
9168/**
9169 * Implements 'FNSTENV'.
9170 *
9171 * @param enmEffOpSize The operand size (only REX.W really matters).
9172 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9173 * @param GCPtrEffDst The address of the image.
9174 */
9175IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9176{
9177 RTPTRUNION uPtr;
9178 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9179 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
9180 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
9181 if (rcStrict != VINF_SUCCESS)
9182 return rcStrict;
9183
9184 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9185
9186 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9187 if (rcStrict != VINF_SUCCESS)
9188 return rcStrict;
9189
9190 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9191 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9192 return VINF_SUCCESS;
9193}
9194
9195
9196/**
9197 * Implements 'FNSAVE'.
9198 *
9199 * @param enmEffOpSize The operand size.
9200 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9201 * @param GCPtrEffDst The address of the image.
9202 */
9203IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9204{
9205 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9206
9207 RTPTRUNION uPtr;
9208 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9209 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
9210 if (rcStrict != VINF_SUCCESS)
9211 return rcStrict;
9212
9213 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9214 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9215 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9216 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9217 {
9218 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
9219 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
9220 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
9221 }
9222
9223 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9224 if (rcStrict != VINF_SUCCESS)
9225 return rcStrict;
9226
9227 /*
9228 * Re-initialize the FPU context.
9229 */
9230 pFpuCtx->FCW = 0x37f;
9231 pFpuCtx->FSW = 0;
9232 pFpuCtx->FTW = 0x00; /* 0 - empty */
9233 pFpuCtx->FPUDP = 0;
9234 pFpuCtx->DS = 0;
9235 pFpuCtx->Rsrvd2= 0;
9236 pFpuCtx->FPUIP = 0;
9237 pFpuCtx->CS = 0;
9238 pFpuCtx->Rsrvd1= 0;
9239 pFpuCtx->FOP = 0;
9240
9241 iemHlpUsedFpu(pVCpu);
9242 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9243 return VINF_SUCCESS;
9244}
9245
9246
9247
9248/**
9249 * Implements 'FLDENV'.
9250 *
9251 * @param enmEffOpSize The operand size (only REX.W really matters).
9252 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9253 * @param GCPtrEffSrc The address of the image.
9254 */
9255IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9256{
9257 RTCPTRUNION uPtr;
9258 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9259 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
9260 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
9261 if (rcStrict != VINF_SUCCESS)
9262 return rcStrict;
9263
9264 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9265
9266 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9267 if (rcStrict != VINF_SUCCESS)
9268 return rcStrict;
9269
9270 iemHlpUsedFpu(pVCpu);
9271 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9272 return VINF_SUCCESS;
9273}
9274
9275
9276/**
9277 * Implements 'FRSTOR'.
9278 *
9279 * @param enmEffOpSize The operand size.
9280 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9281 * @param GCPtrEffSrc The address of the image.
9282 */
9283IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9284{
9285 RTCPTRUNION uPtr;
9286 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9287 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
9288 if (rcStrict != VINF_SUCCESS)
9289 return rcStrict;
9290
9291 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9292 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9293 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9294 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9295 {
9296 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
9297 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
9298 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
9299 pFpuCtx->aRegs[i].au32[3] = 0;
9300 }
9301
9302 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9303 if (rcStrict != VINF_SUCCESS)
9304 return rcStrict;
9305
9306 iemHlpUsedFpu(pVCpu);
9307 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9308 return VINF_SUCCESS;
9309}
9310
9311
9312/**
9313 * Implements 'FLDCW'.
9314 *
9315 * @param u16Fcw The new FCW.
9316 */
9317IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
9318{
9319 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9320
9321 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
9322 /** @todo Testcase: Try see what happens when trying to set undefined bits
9323 * (other than 6 and 7). Currently ignoring them. */
9324 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
9325 * according to FSW. (This is what is currently implemented.) */
9326 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9327 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
9328#ifdef LOG_ENABLED
9329 uint16_t fOldFsw = pFpuCtx->FSW;
9330#endif
9331 iemFpuRecalcExceptionStatus(pFpuCtx);
9332#ifdef LOG_ENABLED
9333 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9334 Log11(("fldcw: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9335 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9336#endif
9337
9338 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9339 iemHlpUsedFpu(pVCpu);
9340 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9341 return VINF_SUCCESS;
9342}
9343
9344
9345
9346/**
9347 * Implements the underflow case of fxch.
9348 *
9349 * @param iStReg The other stack register.
9350 */
9351IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
9352{
9353 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9354
9355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9356 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
9357 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9358 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
9359
9360 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
9361 * registers are read as QNaN and then exchanged. This could be
9362 * wrong... */
9363 if (pFpuCtx->FCW & X86_FCW_IM)
9364 {
9365 if (RT_BIT(iReg1) & pFpuCtx->FTW)
9366 {
9367 if (RT_BIT(iReg2) & pFpuCtx->FTW)
9368 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9369 else
9370 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
9371 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
9372 }
9373 else
9374 {
9375 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
9376 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9377 }
9378 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
9379 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
9380 }
9381 else
9382 {
9383 /* raise underflow exception, don't change anything. */
9384 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
9385 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9386 Log11(("fxch: %04x:%08RX64: Underflow exception (FSW=%#x)\n",
9387 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9388 }
9389
9390 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
9391 iemHlpUsedFpu(pVCpu);
9392 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9393 return VINF_SUCCESS;
9394}
9395
9396
9397/**
9398 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
9399 *
9400 * @param iStReg The other stack register.
9401 * @param pfnAImpl The assembly comparison implementation.
9402 * @param fPop Whether we should pop the stack when done or not.
9403 */
9404IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
9405{
9406 Assert(iStReg < 8);
9407 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9408
9409 /*
9410 * Raise exceptions.
9411 */
9412 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
9413 return iemRaiseDeviceNotAvailable(pVCpu);
9414
9415 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9416 uint16_t u16Fsw = pFpuCtx->FSW;
9417 if (u16Fsw & X86_FSW_ES)
9418 return iemRaiseMathFault(pVCpu);
9419
9420 /*
9421 * Check if any of the register accesses causes #SF + #IA.
9422 */
9423 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
9424 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9425 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
9426 {
9427 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9428
9429 pFpuCtx->FSW &= ~X86_FSW_C1;
9430 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
9431 if ( !(u16Fsw & X86_FSW_IE)
9432 || (pFpuCtx->FCW & X86_FCW_IM) )
9433 {
9434 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9435 pVCpu->cpum.GstCtx.eflags.u |= u32Eflags & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9436 }
9437 }
9438 else if (pFpuCtx->FCW & X86_FCW_IM)
9439 {
9440 /* Masked underflow. */
9441 pFpuCtx->FSW &= ~X86_FSW_C1;
9442 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
9443 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9444 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
9445 }
9446 else
9447 {
9448 /* Raise underflow - don't touch EFLAGS or TOP. */
9449 pFpuCtx->FSW &= ~X86_FSW_C1;
9450 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9451 Log11(("fxch: %04x:%08RX64: Raising IE+SF exception (FSW=%#x)\n",
9452 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9453 fPop = false;
9454 }
9455
9456 /*
9457 * Pop if necessary.
9458 */
9459 if (fPop)
9460 {
9461 pFpuCtx->FTW &= ~RT_BIT(iReg1);
9462 iemFpuStackIncTop(pVCpu);
9463 }
9464
9465 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
9466 iemHlpUsedFpu(pVCpu);
9467 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9468 return VINF_SUCCESS;
9469}
9470
9471/** @} */
9472
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use