VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 47309

Last change on this file since 47309 was 47309, checked in by vboxsync, 12 years ago

REM: Try set DESC_INTEL_UNUSED where applicable. Fixed values in DR6, mapped DR5 to DR7 and DR4 to DR6.

  • Property svn:eol-style set to native
File size: 199.3 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /** @todo in theory the iret could fault and we'd still need this. */
236 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
237 {
238 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
239 selector = selector & 0xfffc;
240 }
241#endif /* VBOX */
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274 sc->flags = e2;
275#ifdef VBOX
276 sc->flags &= ~DESC_INTEL_UNUSABLE;
277 sc->newselector = 0;
278 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
279#endif
280}
281
282/* init the segment cache in vm86 mode. */
283static inline void load_seg_vm(int seg, int selector)
284{
285 selector &= 0xffff;
286#ifdef VBOX
287 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
288 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
289 flags |= (3 << DESC_DPL_SHIFT);
290
291 cpu_x86_load_seg_cache(env, seg, selector,
292 (selector << 4), 0xffff, flags);
293#else /* VBOX */
294 cpu_x86_load_seg_cache(env, seg, selector,
295 (selector << 4), 0xffff, 0);
296#endif /* VBOX */
297}
298
299static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
300 uint32_t *esp_ptr, int dpl)
301{
302#ifndef VBOX
303 int type, index, shift;
304#else
305 unsigned int type, index, shift;
306#endif
307
308#if 0
309 {
310 int i;
311 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
312 for(i=0;i<env->tr.limit;i++) {
313 printf("%02x ", env->tr.base[i]);
314 if ((i & 7) == 7) printf("\n");
315 }
316 printf("\n");
317 }
318#endif
319
320 if (!(env->tr.flags & DESC_P_MASK))
321 cpu_abort(env, "invalid tss");
322 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
323 if ((type & 7) != 1)
324 cpu_abort(env, "invalid tss type");
325 shift = type >> 3;
326 index = (dpl * 4 + 2) << shift;
327 if (index + (4 << shift) - 1 > env->tr.limit)
328 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
329 if (shift == 0) {
330 *esp_ptr = lduw_kernel(env->tr.base + index);
331 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
332 } else {
333 *esp_ptr = ldl_kernel(env->tr.base + index);
334 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
335 }
336}
337
338/* XXX: merge with load_seg() */
339static void tss_load_seg(int seg_reg, int selector)
340{
341 uint32_t e1, e2;
342 int rpl, dpl, cpl;
343
344#ifdef VBOX
345 e1 = e2 = 0; /* gcc warning? */
346 cpl = env->hflags & HF_CPL_MASK;
347 /* Trying to load a selector with CPL=1? */
348 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
349 {
350 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
351 selector = selector & 0xfffc;
352 }
353#endif /* VBOX */
354
355 if ((selector & 0xfffc) != 0) {
356 if (load_segment(&e1, &e2, selector) != 0)
357 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
358 if (!(e2 & DESC_S_MASK))
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 rpl = selector & 3;
361 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
362 cpl = env->hflags & HF_CPL_MASK;
363 if (seg_reg == R_CS) {
364 if (!(e2 & DESC_CS_MASK))
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 /* XXX: is it correct ? */
367 if (dpl != rpl)
368 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
369 if ((e2 & DESC_C_MASK) && dpl > rpl)
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 } else if (seg_reg == R_SS) {
372 /* SS must be writable data */
373 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 if (dpl != cpl || dpl != rpl)
376 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
377 } else {
378 /* not readable code */
379 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
380 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
381 /* if data or non conforming code, checks the rights */
382 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
383 if (dpl < cpl || dpl < rpl)
384 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
385 }
386 }
387 if (!(e2 & DESC_P_MASK))
388 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
389 cpu_x86_load_seg_cache(env, seg_reg, selector,
390 get_seg_base(e1, e2),
391 get_seg_limit(e1, e2),
392 e2);
393 } else {
394 if (seg_reg == R_SS || seg_reg == R_CS)
395 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
396#ifdef VBOX
397# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
398 cpu_x86_load_seg_cache(env, seg_reg, selector,
399 0, 0, 0);
400# endif
401#endif /* VBOX */
402 }
403}
404
405#define SWITCH_TSS_JMP 0
406#define SWITCH_TSS_IRET 1
407#define SWITCH_TSS_CALL 2
408
409/* XXX: restore CPU state in registers (PowerPC case) */
410static void switch_tss(int tss_selector,
411 uint32_t e1, uint32_t e2, int source,
412 uint32_t next_eip)
413{
414 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
415 target_ulong tss_base;
416 uint32_t new_regs[8], new_segs[6];
417 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
418 uint32_t old_eflags, eflags_mask;
419 SegmentCache *dt;
420#ifndef VBOX
421 int index;
422#else
423 unsigned int index;
424#endif
425 target_ulong ptr;
426
427 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
428 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
429
430 /* if task gate, we read the TSS segment and we load it */
431 if (type == 5) {
432 if (!(e2 & DESC_P_MASK))
433 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
434 tss_selector = e1 >> 16;
435 if (tss_selector & 4)
436 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
437 if (load_segment(&e1, &e2, tss_selector) != 0)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 if (e2 & DESC_S_MASK)
440 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
441 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
442 if ((type & 7) != 1)
443 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
444 }
445
446 if (!(e2 & DESC_P_MASK))
447 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
448
449 if (type & 8)
450 tss_limit_max = 103;
451 else
452 tss_limit_max = 43;
453 tss_limit = get_seg_limit(e1, e2);
454 tss_base = get_seg_base(e1, e2);
455 if ((tss_selector & 4) != 0 ||
456 tss_limit < tss_limit_max)
457 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
458 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
459 if (old_type & 8)
460 old_tss_limit_max = 103;
461 else
462 old_tss_limit_max = 43;
463
464 /* read all the registers from the new TSS */
465 if (type & 8) {
466 /* 32 bit */
467 new_cr3 = ldl_kernel(tss_base + 0x1c);
468 new_eip = ldl_kernel(tss_base + 0x20);
469 new_eflags = ldl_kernel(tss_base + 0x24);
470 for(i = 0; i < 8; i++)
471 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
472 for(i = 0; i < 6; i++)
473 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
474 new_ldt = lduw_kernel(tss_base + 0x60);
475 new_trap = ldl_kernel(tss_base + 0x64);
476 } else {
477 /* 16 bit */
478 new_cr3 = 0;
479 new_eip = lduw_kernel(tss_base + 0x0e);
480 new_eflags = lduw_kernel(tss_base + 0x10);
481 for(i = 0; i < 8; i++)
482 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
483 for(i = 0; i < 4; i++)
484 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
485 new_ldt = lduw_kernel(tss_base + 0x2a);
486 new_segs[R_FS] = 0;
487 new_segs[R_GS] = 0;
488 new_trap = 0;
489 }
490
491 /* NOTE: we must avoid memory exceptions during the task switch,
492 so we make dummy accesses before */
493 /* XXX: it can still fail in some cases, so a bigger hack is
494 necessary to valid the TLB after having done the accesses */
495
496 v1 = ldub_kernel(env->tr.base);
497 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
498 stb_kernel(env->tr.base, v1);
499 stb_kernel(env->tr.base + old_tss_limit_max, v2);
500
501 /* clear busy bit (it is restartable) */
502 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
503 target_ulong ptr;
504 uint32_t e2;
505 ptr = env->gdt.base + (env->tr.selector & ~7);
506 e2 = ldl_kernel(ptr + 4);
507 e2 &= ~DESC_TSS_BUSY_MASK;
508 stl_kernel(ptr + 4, e2);
509 }
510 old_eflags = compute_eflags();
511 if (source == SWITCH_TSS_IRET)
512 old_eflags &= ~NT_MASK;
513
514 /* save the current state in the old TSS */
515 if (type & 8) {
516 /* 32 bit */
517 stl_kernel(env->tr.base + 0x20, next_eip);
518 stl_kernel(env->tr.base + 0x24, old_eflags);
519 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
520 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
521 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
522 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
523 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
524 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
525 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
526 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
527 for(i = 0; i < 6; i++)
528 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
529#ifdef VBOX
530 /* Must store the ldt as it gets reloaded and might have been changed. */
531 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
532#endif
533#if defined(VBOX) && defined(DEBUG)
534 printf("TSS 32 bits switch\n");
535 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
536#endif
537 } else {
538 /* 16 bit */
539 stw_kernel(env->tr.base + 0x0e, next_eip);
540 stw_kernel(env->tr.base + 0x10, old_eflags);
541 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
542 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
543 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
544 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
545 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
546 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
547 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
548 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
549 for(i = 0; i < 4; i++)
550 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
551#ifdef VBOX
552 /* Must store the ldt as it gets reloaded and might have been changed. */
553 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
554#endif
555 }
556
557 /* now if an exception occurs, it will occurs in the next task
558 context */
559
560 if (source == SWITCH_TSS_CALL) {
561 stw_kernel(tss_base, env->tr.selector);
562 new_eflags |= NT_MASK;
563 }
564
565 /* set busy bit */
566 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
567 target_ulong ptr;
568 uint32_t e2;
569 ptr = env->gdt.base + (tss_selector & ~7);
570 e2 = ldl_kernel(ptr + 4);
571 e2 |= DESC_TSS_BUSY_MASK;
572 stl_kernel(ptr + 4, e2);
573 }
574
575 /* set the new CPU state */
576 /* from this point, any exception which occurs can give problems */
577 env->cr[0] |= CR0_TS_MASK;
578 env->hflags |= HF_TS_MASK;
579 env->tr.selector = tss_selector;
580 env->tr.base = tss_base;
581 env->tr.limit = tss_limit;
582 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
583#ifdef VBOX
584 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
585 env->tr.newselector = 0;
586#endif
587
588 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
589 cpu_x86_update_cr3(env, new_cr3);
590 }
591
592 /* load all registers without an exception, then reload them with
593 possible exception */
594 env->eip = new_eip;
595 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
596 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
597 if (!(type & 8))
598 eflags_mask &= 0xffff;
599 load_eflags(new_eflags, eflags_mask);
600 /* XXX: what to do in 16 bit case ? */
601 EAX = new_regs[0];
602 ECX = new_regs[1];
603 EDX = new_regs[2];
604 EBX = new_regs[3];
605 ESP = new_regs[4];
606 EBP = new_regs[5];
607 ESI = new_regs[6];
608 EDI = new_regs[7];
609 if (new_eflags & VM_MASK) {
610 for(i = 0; i < 6; i++)
611 load_seg_vm(i, new_segs[i]);
612 /* in vm86, CPL is always 3 */
613 cpu_x86_set_cpl(env, 3);
614 } else {
615 /* CPL is set the RPL of CS */
616 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
617 /* first just selectors as the rest may trigger exceptions */
618 for(i = 0; i < 6; i++)
619 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
620 }
621
622 env->ldt.selector = new_ldt & ~4;
623 env->ldt.base = 0;
624 env->ldt.limit = 0;
625 env->ldt.flags = 0;
626#ifdef VBOX
627 env->ldt.flags = DESC_INTEL_UNUSABLE;
628 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
629 env->ldt.newselector = 0;
630#endif
631
632 /* load the LDT */
633 if (new_ldt & 4)
634 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
635
636 if ((new_ldt & 0xfffc) != 0) {
637 dt = &env->gdt;
638 index = new_ldt & ~7;
639 if ((index + 7) > dt->limit)
640 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
641 ptr = dt->base + index;
642 e1 = ldl_kernel(ptr);
643 e2 = ldl_kernel(ptr + 4);
644 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
645 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
646 if (!(e2 & DESC_P_MASK))
647 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
648 load_seg_cache_raw_dt(&env->ldt, e1, e2);
649 }
650
651 /* load the segments */
652 if (!(new_eflags & VM_MASK)) {
653 tss_load_seg(R_CS, new_segs[R_CS]);
654 tss_load_seg(R_SS, new_segs[R_SS]);
655 tss_load_seg(R_ES, new_segs[R_ES]);
656 tss_load_seg(R_DS, new_segs[R_DS]);
657 tss_load_seg(R_FS, new_segs[R_FS]);
658 tss_load_seg(R_GS, new_segs[R_GS]);
659 }
660
661 /* check that EIP is in the CS segment limits */
662 if (new_eip > env->segs[R_CS].limit) {
663 /* XXX: different exception if CALL ? */
664 raise_exception_err(EXCP0D_GPF, 0);
665 }
666
667#ifndef CONFIG_USER_ONLY
668 /* reset local breakpoints */
669 if (env->dr[7] & 0x55) {
670 for (i = 0; i < 4; i++) {
671 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
672 hw_breakpoint_remove(env, i);
673 }
674 env->dr[7] &= ~0x55;
675 }
676#endif
677}
678
679/* check if Port I/O is allowed in TSS */
680static inline void check_io(int addr, int size)
681{
682#ifndef VBOX
683 int io_offset, val, mask;
684#else
685 int val, mask;
686 unsigned int io_offset;
687#endif /* VBOX */
688
689 /* TSS must be a valid 32 bit one */
690 if (!(env->tr.flags & DESC_P_MASK) ||
691 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
692 env->tr.limit < 103)
693 goto fail;
694 io_offset = lduw_kernel(env->tr.base + 0x66);
695 io_offset += (addr >> 3);
696 /* Note: the check needs two bytes */
697 if ((io_offset + 1) > env->tr.limit)
698 goto fail;
699 val = lduw_kernel(env->tr.base + io_offset);
700 val >>= (addr & 7);
701 mask = (1 << size) - 1;
702 /* all bits must be zero to allow the I/O */
703 if ((val & mask) != 0) {
704 fail:
705 raise_exception_err(EXCP0D_GPF, 0);
706 }
707}
708
709#ifdef VBOX
710
711/* Keep in sync with gen_check_external_event() */
712void helper_check_external_event()
713{
714 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
715 | CPU_INTERRUPT_EXTERNAL_EXIT
716 | CPU_INTERRUPT_EXTERNAL_TIMER
717 | CPU_INTERRUPT_EXTERNAL_DMA))
718 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
719 && (env->eflags & IF_MASK)
720 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
721 {
722 helper_external_event();
723 }
724
725}
726
727void helper_sync_seg(uint32_t reg)
728{
729 if (env->segs[reg].newselector)
730 sync_seg(env, reg, env->segs[reg].newselector);
731}
732
733#endif /* VBOX */
734
735void helper_check_iob(uint32_t t0)
736{
737 check_io(t0, 1);
738}
739
740void helper_check_iow(uint32_t t0)
741{
742 check_io(t0, 2);
743}
744
745void helper_check_iol(uint32_t t0)
746{
747 check_io(t0, 4);
748}
749
750void helper_outb(uint32_t port, uint32_t data)
751{
752#ifndef VBOX
753 cpu_outb(port, data & 0xff);
754#else
755 cpu_outb(env, port, data & 0xff);
756#endif
757}
758
759target_ulong helper_inb(uint32_t port)
760{
761#ifndef VBOX
762 return cpu_inb(port);
763#else
764 return cpu_inb(env, port);
765#endif
766}
767
768void helper_outw(uint32_t port, uint32_t data)
769{
770#ifndef VBOX
771 cpu_outw(port, data & 0xffff);
772#else
773 cpu_outw(env, port, data & 0xffff);
774#endif
775}
776
777target_ulong helper_inw(uint32_t port)
778{
779#ifndef VBOX
780 return cpu_inw(port);
781#else
782 return cpu_inw(env, port);
783#endif
784}
785
786void helper_outl(uint32_t port, uint32_t data)
787{
788#ifndef VBOX
789 cpu_outl(port, data);
790#else
791 cpu_outl(env, port, data);
792#endif
793}
794
795target_ulong helper_inl(uint32_t port)
796{
797#ifndef VBOX
798 return cpu_inl(port);
799#else
800 return cpu_inl(env, port);
801#endif
802}
803
804static inline unsigned int get_sp_mask(unsigned int e2)
805{
806 if (e2 & DESC_B_MASK)
807 return 0xffffffff;
808 else
809 return 0xffff;
810}
811
812static int exeption_has_error_code(int intno)
813{
814 switch(intno) {
815 case 8:
816 case 10:
817 case 11:
818 case 12:
819 case 13:
820 case 14:
821 case 17:
822 return 1;
823 }
824 return 0;
825}
826
827#ifdef TARGET_X86_64
828#define SET_ESP(val, sp_mask)\
829do {\
830 if ((sp_mask) == 0xffff)\
831 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
832 else if ((sp_mask) == 0xffffffffLL)\
833 ESP = (uint32_t)(val);\
834 else\
835 ESP = (val);\
836} while (0)
837#else
838#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
839#endif
840
841/* in 64-bit machines, this can overflow. So this segment addition macro
842 * can be used to trim the value to 32-bit whenever needed */
843#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
844
845/* XXX: add a is_user flag to have proper security support */
846#define PUSHW(ssp, sp, sp_mask, val)\
847{\
848 sp -= 2;\
849 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
850}
851
852#define PUSHL(ssp, sp, sp_mask, val)\
853{\
854 sp -= 4;\
855 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
856}
857
858#define POPW(ssp, sp, sp_mask, val)\
859{\
860 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
861 sp += 2;\
862}
863
864#define POPL(ssp, sp, sp_mask, val)\
865{\
866 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
867 sp += 4;\
868}
869
870/* protected mode interrupt */
871static void do_interrupt_protected(int intno, int is_int, int error_code,
872 unsigned int next_eip, int is_hw)
873{
874 SegmentCache *dt;
875 target_ulong ptr, ssp;
876 int type, dpl, selector, ss_dpl, cpl;
877 int has_error_code, new_stack, shift;
878 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
879 uint32_t old_eip, sp_mask;
880
881#ifdef VBOX
882 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
883 cpu_loop_exit();
884#endif
885
886 has_error_code = 0;
887 if (!is_int && !is_hw)
888 has_error_code = exeption_has_error_code(intno);
889 if (is_int)
890 old_eip = next_eip;
891 else
892 old_eip = env->eip;
893
894 dt = &env->idt;
895#ifndef VBOX
896 if (intno * 8 + 7 > dt->limit)
897#else
898 if ((unsigned)intno * 8 + 7 > dt->limit)
899#endif
900 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
901 ptr = dt->base + intno * 8;
902 e1 = ldl_kernel(ptr);
903 e2 = ldl_kernel(ptr + 4);
904 /* check gate type */
905 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
906 switch(type) {
907 case 5: /* task gate */
908#ifdef VBOX
909 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
910 cpl = env->hflags & HF_CPL_MASK;
911 /* check privilege if software int */
912 if (is_int && dpl < cpl)
913 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
914#endif
915 /* must do that check here to return the correct error code */
916 if (!(e2 & DESC_P_MASK))
917 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
918 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
919 if (has_error_code) {
920 int type;
921 uint32_t mask;
922 /* push the error code */
923 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
924 shift = type >> 3;
925 if (env->segs[R_SS].flags & DESC_B_MASK)
926 mask = 0xffffffff;
927 else
928 mask = 0xffff;
929 esp = (ESP - (2 << shift)) & mask;
930 ssp = env->segs[R_SS].base + esp;
931 if (shift)
932 stl_kernel(ssp, error_code);
933 else
934 stw_kernel(ssp, error_code);
935 SET_ESP(esp, mask);
936 }
937 return;
938 case 6: /* 286 interrupt gate */
939 case 7: /* 286 trap gate */
940 case 14: /* 386 interrupt gate */
941 case 15: /* 386 trap gate */
942 break;
943 default:
944 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
945 break;
946 }
947 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
948 cpl = env->hflags & HF_CPL_MASK;
949 /* check privilege if software int */
950 if (is_int && dpl < cpl)
951 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
952 /* check valid bit */
953 if (!(e2 & DESC_P_MASK))
954 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
955 selector = e1 >> 16;
956 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
957 if ((selector & 0xfffc) == 0)
958 raise_exception_err(EXCP0D_GPF, 0);
959
960 if (load_segment(&e1, &e2, selector) != 0)
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
963 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
964 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
965 if (dpl > cpl)
966 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
967 if (!(e2 & DESC_P_MASK))
968 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
969 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
970 /* to inner privilege */
971 get_ss_esp_from_tss(&ss, &esp, dpl);
972 if ((ss & 0xfffc) == 0)
973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
974 if ((ss & 3) != dpl)
975 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
976 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
977 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
978 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
979 if (ss_dpl != dpl)
980 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
981 if (!(ss_e2 & DESC_S_MASK) ||
982 (ss_e2 & DESC_CS_MASK) ||
983 !(ss_e2 & DESC_W_MASK))
984 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
985 if (!(ss_e2 & DESC_P_MASK))
986#ifdef VBOX /* See page 3-477 of 253666.pdf */
987 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
988#else
989 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
990#endif
991 new_stack = 1;
992 sp_mask = get_sp_mask(ss_e2);
993 ssp = get_seg_base(ss_e1, ss_e2);
994#if defined(VBOX) && defined(DEBUG)
995 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
996#endif
997 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
998 /* to same privilege */
999 if (env->eflags & VM_MASK)
1000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1001 new_stack = 0;
1002 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1003 ssp = env->segs[R_SS].base;
1004 esp = ESP;
1005 dpl = cpl;
1006 } else {
1007 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1008 new_stack = 0; /* avoid warning */
1009 sp_mask = 0; /* avoid warning */
1010 ssp = 0; /* avoid warning */
1011 esp = 0; /* avoid warning */
1012 }
1013
1014 shift = type >> 3;
1015
1016#if 0
1017 /* XXX: check that enough room is available */
1018 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1019 if (env->eflags & VM_MASK)
1020 push_size += 8;
1021 push_size <<= shift;
1022#endif
1023 if (shift == 1) {
1024 if (new_stack) {
1025 if (env->eflags & VM_MASK) {
1026 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1027 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1028 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1029 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1030 }
1031 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1032 PUSHL(ssp, esp, sp_mask, ESP);
1033 }
1034 PUSHL(ssp, esp, sp_mask, compute_eflags());
1035 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1036 PUSHL(ssp, esp, sp_mask, old_eip);
1037 if (has_error_code) {
1038 PUSHL(ssp, esp, sp_mask, error_code);
1039 }
1040 } else {
1041 if (new_stack) {
1042 if (env->eflags & VM_MASK) {
1043 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1044 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1045 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1046 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1047 }
1048 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1049 PUSHW(ssp, esp, sp_mask, ESP);
1050 }
1051 PUSHW(ssp, esp, sp_mask, compute_eflags());
1052 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1053 PUSHW(ssp, esp, sp_mask, old_eip);
1054 if (has_error_code) {
1055 PUSHW(ssp, esp, sp_mask, error_code);
1056 }
1057 }
1058
1059 if (new_stack) {
1060 if (env->eflags & VM_MASK) {
1061 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1062 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1063 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1064 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1065 }
1066 ss = (ss & ~3) | dpl;
1067 cpu_x86_load_seg_cache(env, R_SS, ss,
1068 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1069 }
1070 SET_ESP(esp, sp_mask);
1071
1072 selector = (selector & ~3) | dpl;
1073 cpu_x86_load_seg_cache(env, R_CS, selector,
1074 get_seg_base(e1, e2),
1075 get_seg_limit(e1, e2),
1076 e2);
1077 cpu_x86_set_cpl(env, dpl);
1078 env->eip = offset;
1079
1080 /* interrupt gate clear IF mask */
1081 if ((type & 1) == 0) {
1082 env->eflags &= ~IF_MASK;
1083 }
1084#ifndef VBOX
1085 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1086#else
1087 /*
1088 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1089 * gets confused by seemingly changed EFLAGS. See #3491 and
1090 * public bug #2341.
1091 */
1092 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1093#endif
1094}
1095
1096#ifdef VBOX
1097
1098/* check if VME interrupt redirection is enabled in TSS */
1099DECLINLINE(bool) is_vme_irq_redirected(int intno)
1100{
1101 unsigned int io_offset, intredir_offset;
1102 unsigned char val, mask;
1103
1104 /* TSS must be a valid 32 bit one */
1105 if (!(env->tr.flags & DESC_P_MASK) ||
1106 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1107 env->tr.limit < 103)
1108 goto fail;
1109 io_offset = lduw_kernel(env->tr.base + 0x66);
1110 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1111 if (io_offset < 0x68 + 0x20)
1112 io_offset = 0x68 + 0x20;
1113 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1114 intredir_offset = io_offset - 0x20;
1115
1116 intredir_offset += (intno >> 3);
1117 if ((intredir_offset) > env->tr.limit)
1118 goto fail;
1119
1120 val = ldub_kernel(env->tr.base + intredir_offset);
1121 mask = 1 << (unsigned char)(intno & 7);
1122
1123 /* bit set means no redirection. */
1124 if ((val & mask) != 0) {
1125 return false;
1126 }
1127 return true;
1128
1129fail:
1130 raise_exception_err(EXCP0D_GPF, 0);
1131 return true;
1132}
1133
1134/* V86 mode software interrupt with CR4.VME=1 */
1135static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1136{
1137 target_ulong ptr, ssp;
1138 int selector;
1139 uint32_t offset, esp;
1140 uint32_t old_cs, old_eflags;
1141 uint32_t iopl;
1142
1143 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1144
1145 if (!is_vme_irq_redirected(intno))
1146 {
1147 if (iopl == 3)
1148 {
1149 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1150 return;
1151 }
1152 else
1153 raise_exception_err(EXCP0D_GPF, 0);
1154 }
1155
1156 /* virtual mode idt is at linear address 0 */
1157 ptr = 0 + intno * 4;
1158 offset = lduw_kernel(ptr);
1159 selector = lduw_kernel(ptr + 2);
1160 esp = ESP;
1161 ssp = env->segs[R_SS].base;
1162 old_cs = env->segs[R_CS].selector;
1163
1164 old_eflags = compute_eflags();
1165 if (iopl < 3)
1166 {
1167 /* copy VIF into IF and set IOPL to 3 */
1168 if (env->eflags & VIF_MASK)
1169 old_eflags |= IF_MASK;
1170 else
1171 old_eflags &= ~IF_MASK;
1172
1173 old_eflags |= (3 << IOPL_SHIFT);
1174 }
1175
1176 /* XXX: use SS segment size ? */
1177 PUSHW(ssp, esp, 0xffff, old_eflags);
1178 PUSHW(ssp, esp, 0xffff, old_cs);
1179 PUSHW(ssp, esp, 0xffff, next_eip);
1180
1181 /* update processor state */
1182 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1183 env->eip = offset;
1184 env->segs[R_CS].selector = selector;
1185 env->segs[R_CS].base = (selector << 4);
1186 env->eflags &= ~(TF_MASK | RF_MASK);
1187
1188 if (iopl < 3)
1189 env->eflags &= ~VIF_MASK;
1190 else
1191 env->eflags &= ~IF_MASK;
1192}
1193
1194#endif /* VBOX */
1195
1196#ifdef TARGET_X86_64
1197
1198#define PUSHQ(sp, val)\
1199{\
1200 sp -= 8;\
1201 stq_kernel(sp, (val));\
1202}
1203
1204#define POPQ(sp, val)\
1205{\
1206 val = ldq_kernel(sp);\
1207 sp += 8;\
1208}
1209
1210static inline target_ulong get_rsp_from_tss(int level)
1211{
1212 int index;
1213
1214#if 0
1215 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1216 env->tr.base, env->tr.limit);
1217#endif
1218
1219 if (!(env->tr.flags & DESC_P_MASK))
1220 cpu_abort(env, "invalid tss");
1221 index = 8 * level + 4;
1222 if ((index + 7) > env->tr.limit)
1223 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1224 return ldq_kernel(env->tr.base + index);
1225}
1226
1227/* 64 bit interrupt */
1228static void do_interrupt64(int intno, int is_int, int error_code,
1229 target_ulong next_eip, int is_hw)
1230{
1231 SegmentCache *dt;
1232 target_ulong ptr;
1233 int type, dpl, selector, cpl, ist;
1234 int has_error_code, new_stack;
1235 uint32_t e1, e2, e3, ss;
1236 target_ulong old_eip, esp, offset;
1237
1238#ifdef VBOX
1239 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1240 cpu_loop_exit();
1241#endif
1242
1243 has_error_code = 0;
1244 if (!is_int && !is_hw)
1245 has_error_code = exeption_has_error_code(intno);
1246 if (is_int)
1247 old_eip = next_eip;
1248 else
1249 old_eip = env->eip;
1250
1251 dt = &env->idt;
1252 if (intno * 16 + 15 > dt->limit)
1253 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1254 ptr = dt->base + intno * 16;
1255 e1 = ldl_kernel(ptr);
1256 e2 = ldl_kernel(ptr + 4);
1257 e3 = ldl_kernel(ptr + 8);
1258 /* check gate type */
1259 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1260 switch(type) {
1261 case 14: /* 386 interrupt gate */
1262 case 15: /* 386 trap gate */
1263 break;
1264 default:
1265 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1266 break;
1267 }
1268 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1269 cpl = env->hflags & HF_CPL_MASK;
1270 /* check privilege if software int */
1271 if (is_int && dpl < cpl)
1272 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1273 /* check valid bit */
1274 if (!(e2 & DESC_P_MASK))
1275 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1276 selector = e1 >> 16;
1277 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1278 ist = e2 & 7;
1279 if ((selector & 0xfffc) == 0)
1280 raise_exception_err(EXCP0D_GPF, 0);
1281
1282 if (load_segment(&e1, &e2, selector) != 0)
1283 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1284 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1285 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1286 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1287 if (dpl > cpl)
1288 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1289 if (!(e2 & DESC_P_MASK))
1290 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1291 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1292 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1293 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1294 /* to inner privilege */
1295 if (ist != 0)
1296 esp = get_rsp_from_tss(ist + 3);
1297 else
1298 esp = get_rsp_from_tss(dpl);
1299 esp &= ~0xfLL; /* align stack */
1300 ss = 0;
1301 new_stack = 1;
1302 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1303 /* to same privilege */
1304 if (env->eflags & VM_MASK)
1305 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1306 new_stack = 0;
1307 if (ist != 0)
1308 esp = get_rsp_from_tss(ist + 3);
1309 else
1310 esp = ESP;
1311 esp &= ~0xfLL; /* align stack */
1312 dpl = cpl;
1313 } else {
1314 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1315 new_stack = 0; /* avoid warning */
1316 esp = 0; /* avoid warning */
1317 }
1318
1319 PUSHQ(esp, env->segs[R_SS].selector);
1320 PUSHQ(esp, ESP);
1321 PUSHQ(esp, compute_eflags());
1322 PUSHQ(esp, env->segs[R_CS].selector);
1323 PUSHQ(esp, old_eip);
1324 if (has_error_code) {
1325 PUSHQ(esp, error_code);
1326 }
1327
1328 if (new_stack) {
1329 ss = 0 | dpl;
1330#ifndef VBOX
1331 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1332#else
1333 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1334#endif
1335 }
1336 ESP = esp;
1337
1338 selector = (selector & ~3) | dpl;
1339 cpu_x86_load_seg_cache(env, R_CS, selector,
1340 get_seg_base(e1, e2),
1341 get_seg_limit(e1, e2),
1342 e2);
1343 cpu_x86_set_cpl(env, dpl);
1344 env->eip = offset;
1345
1346 /* interrupt gate clear IF mask */
1347 if ((type & 1) == 0) {
1348 env->eflags &= ~IF_MASK;
1349 }
1350#ifndef VBOX
1351 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1352#else /* VBOX */
1353 /*
1354 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1355 * gets confused by seemingly changed EFLAGS. See #3491 and
1356 * public bug #2341.
1357 */
1358 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1359#endif /* VBOX */
1360}
1361#endif
1362
1363#ifdef TARGET_X86_64
1364#if defined(CONFIG_USER_ONLY)
1365void helper_syscall(int next_eip_addend)
1366{
1367 env->exception_index = EXCP_SYSCALL;
1368 env->exception_next_eip = env->eip + next_eip_addend;
1369 cpu_loop_exit();
1370}
1371#else
1372void helper_syscall(int next_eip_addend)
1373{
1374 int selector;
1375
1376 if (!(env->efer & MSR_EFER_SCE)) {
1377 raise_exception_err(EXCP06_ILLOP, 0);
1378 }
1379 selector = (env->star >> 32) & 0xffff;
1380 if (env->hflags & HF_LMA_MASK) {
1381 int code64;
1382
1383 ECX = env->eip + next_eip_addend;
1384 env->regs[11] = compute_eflags();
1385
1386 code64 = env->hflags & HF_CS64_MASK;
1387
1388 cpu_x86_set_cpl(env, 0);
1389 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1390 0, 0xffffffff,
1391 DESC_G_MASK | DESC_P_MASK |
1392 DESC_S_MASK |
1393 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1394 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1395 0, 0xffffffff,
1396 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1397 DESC_S_MASK |
1398 DESC_W_MASK | DESC_A_MASK);
1399 env->eflags &= ~env->fmask;
1400 load_eflags(env->eflags, 0);
1401 if (code64)
1402 env->eip = env->lstar;
1403 else
1404 env->eip = env->cstar;
1405 } else {
1406 ECX = (uint32_t)(env->eip + next_eip_addend);
1407
1408 cpu_x86_set_cpl(env, 0);
1409 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1410 0, 0xffffffff,
1411 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1412 DESC_S_MASK |
1413 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1414 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1415 0, 0xffffffff,
1416 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1417 DESC_S_MASK |
1418 DESC_W_MASK | DESC_A_MASK);
1419 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1420 env->eip = (uint32_t)env->star;
1421 }
1422}
1423#endif
1424#endif
1425
1426#ifdef TARGET_X86_64
1427void helper_sysret(int dflag)
1428{
1429 int cpl, selector;
1430
1431 if (!(env->efer & MSR_EFER_SCE)) {
1432 raise_exception_err(EXCP06_ILLOP, 0);
1433 }
1434 cpl = env->hflags & HF_CPL_MASK;
1435 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1436 raise_exception_err(EXCP0D_GPF, 0);
1437 }
1438 selector = (env->star >> 48) & 0xffff;
1439 if (env->hflags & HF_LMA_MASK) {
1440 if (dflag == 2) {
1441 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1442 0, 0xffffffff,
1443 DESC_G_MASK | DESC_P_MASK |
1444 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1445 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1446 DESC_L_MASK);
1447 env->eip = ECX;
1448 } else {
1449 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1450 0, 0xffffffff,
1451 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1452 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1453 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1454 env->eip = (uint32_t)ECX;
1455 }
1456 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1457 0, 0xffffffff,
1458 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1459 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1460 DESC_W_MASK | DESC_A_MASK);
1461 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1462 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1463 cpu_x86_set_cpl(env, 3);
1464 } else {
1465 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1466 0, 0xffffffff,
1467 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1468 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1469 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1470 env->eip = (uint32_t)ECX;
1471 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1472 0, 0xffffffff,
1473 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1474 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1475 DESC_W_MASK | DESC_A_MASK);
1476 env->eflags |= IF_MASK;
1477 cpu_x86_set_cpl(env, 3);
1478 }
1479}
1480#endif
1481
1482#ifdef VBOX
1483
1484/**
1485 * Checks and processes external VMM events.
1486 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1487 */
1488void helper_external_event(void)
1489{
1490# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1491 uintptr_t uSP;
1492# ifdef RT_ARCH_AMD64
1493 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1494# else
1495 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1496# endif
1497 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1498# endif
1499 /* Keep in sync with flags checked by gen_check_external_event() */
1500 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1501 {
1502 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1503 ~CPU_INTERRUPT_EXTERNAL_HARD);
1504 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1505 }
1506 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1507 {
1508 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1509 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1510 cpu_exit(env);
1511 }
1512 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1513 {
1514 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1515 ~CPU_INTERRUPT_EXTERNAL_DMA);
1516 remR3DmaRun(env);
1517 }
1518 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1519 {
1520 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1521 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1522 remR3TimersRun(env);
1523 }
1524 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1525 {
1526 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1527 ~CPU_INTERRUPT_EXTERNAL_HARD);
1528 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1529 }
1530}
1531
1532/* helper for recording call instruction addresses for later scanning */
1533void helper_record_call()
1534{
1535 if ( !(env->state & CPU_RAW_RING0)
1536 && (env->cr[0] & CR0_PG_MASK)
1537 && !(env->eflags & X86_EFL_IF))
1538 remR3RecordCall(env);
1539}
1540
1541#endif /* VBOX */
1542
1543/* real mode interrupt */
1544static void do_interrupt_real(int intno, int is_int, int error_code,
1545 unsigned int next_eip)
1546{
1547 SegmentCache *dt;
1548 target_ulong ptr, ssp;
1549 int selector;
1550 uint32_t offset, esp;
1551 uint32_t old_cs, old_eip;
1552
1553 /* real mode (simpler !) */
1554 dt = &env->idt;
1555#ifndef VBOX
1556 if (intno * 4 + 3 > dt->limit)
1557#else
1558 if ((unsigned)intno * 4 + 3 > dt->limit)
1559#endif
1560 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1561 ptr = dt->base + intno * 4;
1562 offset = lduw_kernel(ptr);
1563 selector = lduw_kernel(ptr + 2);
1564 esp = ESP;
1565 ssp = env->segs[R_SS].base;
1566 if (is_int)
1567 old_eip = next_eip;
1568 else
1569 old_eip = env->eip;
1570 old_cs = env->segs[R_CS].selector;
1571 /* XXX: use SS segment size ? */
1572 PUSHW(ssp, esp, 0xffff, compute_eflags());
1573 PUSHW(ssp, esp, 0xffff, old_cs);
1574 PUSHW(ssp, esp, 0xffff, old_eip);
1575
1576 /* update processor state */
1577 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1578 env->eip = offset;
1579 env->segs[R_CS].selector = selector;
1580 env->segs[R_CS].base = (selector << 4);
1581 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1582}
1583
1584/* fake user mode interrupt */
1585void do_interrupt_user(int intno, int is_int, int error_code,
1586 target_ulong next_eip)
1587{
1588 SegmentCache *dt;
1589 target_ulong ptr;
1590 int dpl, cpl, shift;
1591 uint32_t e2;
1592
1593 dt = &env->idt;
1594 if (env->hflags & HF_LMA_MASK) {
1595 shift = 4;
1596 } else {
1597 shift = 3;
1598 }
1599 ptr = dt->base + (intno << shift);
1600 e2 = ldl_kernel(ptr + 4);
1601
1602 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1603 cpl = env->hflags & HF_CPL_MASK;
1604 /* check privilege if software int */
1605 if (is_int && dpl < cpl)
1606 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1607
1608 /* Since we emulate only user space, we cannot do more than
1609 exiting the emulation with the suitable exception and error
1610 code */
1611 if (is_int)
1612 EIP = next_eip;
1613}
1614
1615#if !defined(CONFIG_USER_ONLY)
1616static void handle_even_inj(int intno, int is_int, int error_code,
1617 int is_hw, int rm)
1618{
1619 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1620 if (!(event_inj & SVM_EVTINJ_VALID)) {
1621 int type;
1622 if (is_int)
1623 type = SVM_EVTINJ_TYPE_SOFT;
1624 else
1625 type = SVM_EVTINJ_TYPE_EXEPT;
1626 event_inj = intno | type | SVM_EVTINJ_VALID;
1627 if (!rm && exeption_has_error_code(intno)) {
1628 event_inj |= SVM_EVTINJ_VALID_ERR;
1629 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1630 }
1631 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1632 }
1633}
1634#endif
1635
1636/*
1637 * Begin execution of an interruption. is_int is TRUE if coming from
1638 * the int instruction. next_eip is the EIP value AFTER the interrupt
1639 * instruction. It is only relevant if is_int is TRUE.
1640 */
1641void do_interrupt(int intno, int is_int, int error_code,
1642 target_ulong next_eip, int is_hw)
1643{
1644 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1645 if ((env->cr[0] & CR0_PE_MASK)) {
1646 static int count;
1647 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1648 count, intno, error_code, is_int,
1649 env->hflags & HF_CPL_MASK,
1650 env->segs[R_CS].selector, EIP,
1651 (int)env->segs[R_CS].base + EIP,
1652 env->segs[R_SS].selector, ESP);
1653 if (intno == 0x0e) {
1654 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1655 } else {
1656 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1657 }
1658 qemu_log("\n");
1659 log_cpu_state(env, X86_DUMP_CCOP);
1660#if 0
1661 {
1662 int i;
1663 uint8_t *ptr;
1664 qemu_log(" code=");
1665 ptr = env->segs[R_CS].base + env->eip;
1666 for(i = 0; i < 16; i++) {
1667 qemu_log(" %02x", ldub(ptr + i));
1668 }
1669 qemu_log("\n");
1670 }
1671#endif
1672 count++;
1673 }
1674 }
1675#ifdef VBOX
1676 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1677 if (is_int) {
1678 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1679 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1680 } else {
1681 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1682 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1683 }
1684 }
1685#endif
1686 if (env->cr[0] & CR0_PE_MASK) {
1687#if !defined(CONFIG_USER_ONLY)
1688 if (env->hflags & HF_SVMI_MASK)
1689 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1690#endif
1691#ifdef TARGET_X86_64
1692 if (env->hflags & HF_LMA_MASK) {
1693 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1694 } else
1695#endif
1696 {
1697#ifdef VBOX
1698 /* int xx *, v86 code and VME enabled? */
1699 if ( (env->eflags & VM_MASK)
1700 && (env->cr[4] & CR4_VME_MASK)
1701 && is_int
1702 && !is_hw
1703 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1704 )
1705 do_soft_interrupt_vme(intno, error_code, next_eip);
1706 else
1707#endif /* VBOX */
1708 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1709 }
1710 } else {
1711#if !defined(CONFIG_USER_ONLY)
1712 if (env->hflags & HF_SVMI_MASK)
1713 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1714#endif
1715 do_interrupt_real(intno, is_int, error_code, next_eip);
1716 }
1717
1718#if !defined(CONFIG_USER_ONLY)
1719 if (env->hflags & HF_SVMI_MASK) {
1720 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1721 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1722 }
1723#endif
1724}
1725
1726/* This should come from sysemu.h - if we could include it here... */
1727void qemu_system_reset_request(void);
1728
1729/*
1730 * Check nested exceptions and change to double or triple fault if
1731 * needed. It should only be called, if this is not an interrupt.
1732 * Returns the new exception number.
1733 */
1734static int check_exception(int intno, int *error_code)
1735{
1736 int first_contributory = env->old_exception == 0 ||
1737 (env->old_exception >= 10 &&
1738 env->old_exception <= 13);
1739 int second_contributory = intno == 0 ||
1740 (intno >= 10 && intno <= 13);
1741
1742 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1743 env->old_exception, intno);
1744
1745#if !defined(CONFIG_USER_ONLY)
1746 if (env->old_exception == EXCP08_DBLE) {
1747 if (env->hflags & HF_SVMI_MASK)
1748 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1749
1750 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1751
1752# ifndef VBOX
1753 qemu_system_reset_request();
1754# else
1755 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1756# endif
1757 return EXCP_HLT;
1758 }
1759#endif
1760
1761 if ((first_contributory && second_contributory)
1762 || (env->old_exception == EXCP0E_PAGE &&
1763 (second_contributory || (intno == EXCP0E_PAGE)))) {
1764 intno = EXCP08_DBLE;
1765 *error_code = 0;
1766 }
1767
1768 if (second_contributory || (intno == EXCP0E_PAGE) ||
1769 (intno == EXCP08_DBLE))
1770 env->old_exception = intno;
1771
1772 return intno;
1773}
1774
1775/*
1776 * Signal an interruption. It is executed in the main CPU loop.
1777 * is_int is TRUE if coming from the int instruction. next_eip is the
1778 * EIP value AFTER the interrupt instruction. It is only relevant if
1779 * is_int is TRUE.
1780 */
1781static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1782 int next_eip_addend)
1783{
1784#if defined(VBOX) && defined(DEBUG)
1785 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1786#endif
1787 if (!is_int) {
1788 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1789 intno = check_exception(intno, &error_code);
1790 } else {
1791 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1792 }
1793
1794 env->exception_index = intno;
1795 env->error_code = error_code;
1796 env->exception_is_int = is_int;
1797 env->exception_next_eip = env->eip + next_eip_addend;
1798 cpu_loop_exit();
1799}
1800
1801/* shortcuts to generate exceptions */
1802
1803void raise_exception_err(int exception_index, int error_code)
1804{
1805 raise_interrupt(exception_index, 0, error_code, 0);
1806}
1807
1808void raise_exception(int exception_index)
1809{
1810 raise_interrupt(exception_index, 0, 0, 0);
1811}
1812
1813void raise_exception_env(int exception_index, CPUState *nenv)
1814{
1815 env = nenv;
1816 raise_exception(exception_index);
1817}
1818/* SMM support */
1819
1820#if defined(CONFIG_USER_ONLY)
1821
1822void do_smm_enter(void)
1823{
1824}
1825
1826void helper_rsm(void)
1827{
1828}
1829
1830#else
1831
1832#ifdef TARGET_X86_64
1833#define SMM_REVISION_ID 0x00020064
1834#else
1835#define SMM_REVISION_ID 0x00020000
1836#endif
1837
1838void do_smm_enter(void)
1839{
1840 target_ulong sm_state;
1841 SegmentCache *dt;
1842 int i, offset;
1843
1844 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1845 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1846
1847 env->hflags |= HF_SMM_MASK;
1848 cpu_smm_update(env);
1849
1850 sm_state = env->smbase + 0x8000;
1851
1852#ifdef TARGET_X86_64
1853 for(i = 0; i < 6; i++) {
1854 dt = &env->segs[i];
1855 offset = 0x7e00 + i * 16;
1856 stw_phys(sm_state + offset, dt->selector);
1857 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1858 stl_phys(sm_state + offset + 4, dt->limit);
1859 stq_phys(sm_state + offset + 8, dt->base);
1860 }
1861
1862 stq_phys(sm_state + 0x7e68, env->gdt.base);
1863 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1864
1865 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1866 stq_phys(sm_state + 0x7e78, env->ldt.base);
1867 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1868 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1869
1870 stq_phys(sm_state + 0x7e88, env->idt.base);
1871 stl_phys(sm_state + 0x7e84, env->idt.limit);
1872
1873 stw_phys(sm_state + 0x7e90, env->tr.selector);
1874 stq_phys(sm_state + 0x7e98, env->tr.base);
1875 stl_phys(sm_state + 0x7e94, env->tr.limit);
1876 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1877
1878 stq_phys(sm_state + 0x7ed0, env->efer);
1879
1880 stq_phys(sm_state + 0x7ff8, EAX);
1881 stq_phys(sm_state + 0x7ff0, ECX);
1882 stq_phys(sm_state + 0x7fe8, EDX);
1883 stq_phys(sm_state + 0x7fe0, EBX);
1884 stq_phys(sm_state + 0x7fd8, ESP);
1885 stq_phys(sm_state + 0x7fd0, EBP);
1886 stq_phys(sm_state + 0x7fc8, ESI);
1887 stq_phys(sm_state + 0x7fc0, EDI);
1888 for(i = 8; i < 16; i++)
1889 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1890 stq_phys(sm_state + 0x7f78, env->eip);
1891 stl_phys(sm_state + 0x7f70, compute_eflags());
1892 stl_phys(sm_state + 0x7f68, env->dr[6]);
1893 stl_phys(sm_state + 0x7f60, env->dr[7]);
1894
1895 stl_phys(sm_state + 0x7f48, env->cr[4]);
1896 stl_phys(sm_state + 0x7f50, env->cr[3]);
1897 stl_phys(sm_state + 0x7f58, env->cr[0]);
1898
1899 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1900 stl_phys(sm_state + 0x7f00, env->smbase);
1901#else
1902 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1903 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1904 stl_phys(sm_state + 0x7ff4, compute_eflags());
1905 stl_phys(sm_state + 0x7ff0, env->eip);
1906 stl_phys(sm_state + 0x7fec, EDI);
1907 stl_phys(sm_state + 0x7fe8, ESI);
1908 stl_phys(sm_state + 0x7fe4, EBP);
1909 stl_phys(sm_state + 0x7fe0, ESP);
1910 stl_phys(sm_state + 0x7fdc, EBX);
1911 stl_phys(sm_state + 0x7fd8, EDX);
1912 stl_phys(sm_state + 0x7fd4, ECX);
1913 stl_phys(sm_state + 0x7fd0, EAX);
1914 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1915 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1916
1917 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1918 stl_phys(sm_state + 0x7f64, env->tr.base);
1919 stl_phys(sm_state + 0x7f60, env->tr.limit);
1920 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1921
1922 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1923 stl_phys(sm_state + 0x7f80, env->ldt.base);
1924 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1925 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1926
1927 stl_phys(sm_state + 0x7f74, env->gdt.base);
1928 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1929
1930 stl_phys(sm_state + 0x7f58, env->idt.base);
1931 stl_phys(sm_state + 0x7f54, env->idt.limit);
1932
1933 for(i = 0; i < 6; i++) {
1934 dt = &env->segs[i];
1935 if (i < 3)
1936 offset = 0x7f84 + i * 12;
1937 else
1938 offset = 0x7f2c + (i - 3) * 12;
1939 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1940 stl_phys(sm_state + offset + 8, dt->base);
1941 stl_phys(sm_state + offset + 4, dt->limit);
1942 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1943 }
1944 stl_phys(sm_state + 0x7f14, env->cr[4]);
1945
1946 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1947 stl_phys(sm_state + 0x7ef8, env->smbase);
1948#endif
1949 /* init SMM cpu state */
1950
1951#ifdef TARGET_X86_64
1952 cpu_load_efer(env, 0);
1953#endif
1954 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1955 env->eip = 0x00008000;
1956 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1957 0xffffffff, 0);
1958 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1959 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1960 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1961 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1962 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1963
1964 cpu_x86_update_cr0(env,
1965 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1966 cpu_x86_update_cr4(env, 0);
1967 env->dr[7] = 0x00000400;
1968 CC_OP = CC_OP_EFLAGS;
1969}
1970
1971void helper_rsm(void)
1972{
1973#ifdef VBOX
1974 cpu_abort(env, "helper_rsm");
1975#else /* !VBOX */
1976 target_ulong sm_state;
1977 int i, offset;
1978 uint32_t val;
1979
1980 sm_state = env->smbase + 0x8000;
1981#ifdef TARGET_X86_64
1982 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1983
1984 for(i = 0; i < 6; i++) {
1985 offset = 0x7e00 + i * 16;
1986 cpu_x86_load_seg_cache(env, i,
1987 lduw_phys(sm_state + offset),
1988 ldq_phys(sm_state + offset + 8),
1989 ldl_phys(sm_state + offset + 4),
1990 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1991 }
1992
1993 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1994 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1995
1996 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1997 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1998 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1999 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2000#ifdef VBOX
2001 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2002 env->ldt.newselector = 0;
2003#endif
2004
2005 env->idt.base = ldq_phys(sm_state + 0x7e88);
2006 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2007
2008 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2009 env->tr.base = ldq_phys(sm_state + 0x7e98);
2010 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2011 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2012#ifdef VBOX
2013 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2014 env->tr.newselector = 0;
2015#endif
2016
2017 EAX = ldq_phys(sm_state + 0x7ff8);
2018 ECX = ldq_phys(sm_state + 0x7ff0);
2019 EDX = ldq_phys(sm_state + 0x7fe8);
2020 EBX = ldq_phys(sm_state + 0x7fe0);
2021 ESP = ldq_phys(sm_state + 0x7fd8);
2022 EBP = ldq_phys(sm_state + 0x7fd0);
2023 ESI = ldq_phys(sm_state + 0x7fc8);
2024 EDI = ldq_phys(sm_state + 0x7fc0);
2025 for(i = 8; i < 16; i++)
2026 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2027 env->eip = ldq_phys(sm_state + 0x7f78);
2028 load_eflags(ldl_phys(sm_state + 0x7f70),
2029 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2030 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2031 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2032
2033 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2034 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2035 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2036
2037 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2038 if (val & 0x20000) {
2039 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2040 }
2041#else
2042 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2043 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2044 load_eflags(ldl_phys(sm_state + 0x7ff4),
2045 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2046 env->eip = ldl_phys(sm_state + 0x7ff0);
2047 EDI = ldl_phys(sm_state + 0x7fec);
2048 ESI = ldl_phys(sm_state + 0x7fe8);
2049 EBP = ldl_phys(sm_state + 0x7fe4);
2050 ESP = ldl_phys(sm_state + 0x7fe0);
2051 EBX = ldl_phys(sm_state + 0x7fdc);
2052 EDX = ldl_phys(sm_state + 0x7fd8);
2053 ECX = ldl_phys(sm_state + 0x7fd4);
2054 EAX = ldl_phys(sm_state + 0x7fd0);
2055 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2056 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2057
2058 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2059 env->tr.base = ldl_phys(sm_state + 0x7f64);
2060 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2061 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2062#ifdef VBOX
2063 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2064 env->tr.newselector = 0;
2065#endif
2066
2067 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2068 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2069 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2070 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2071#ifdef VBOX
2072 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2073 env->ldt.newselector = 0;
2074#endif
2075
2076 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2077 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2078
2079 env->idt.base = ldl_phys(sm_state + 0x7f58);
2080 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2081
2082 for(i = 0; i < 6; i++) {
2083 if (i < 3)
2084 offset = 0x7f84 + i * 12;
2085 else
2086 offset = 0x7f2c + (i - 3) * 12;
2087 cpu_x86_load_seg_cache(env, i,
2088 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2089 ldl_phys(sm_state + offset + 8),
2090 ldl_phys(sm_state + offset + 4),
2091 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2092 }
2093 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2094
2095 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2096 if (val & 0x20000) {
2097 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2098 }
2099#endif
2100 CC_OP = CC_OP_EFLAGS;
2101 env->hflags &= ~HF_SMM_MASK;
2102 cpu_smm_update(env);
2103
2104 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2105 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2106#endif /* !VBOX */
2107}
2108
2109#endif /* !CONFIG_USER_ONLY */
2110
2111
2112/* division, flags are undefined */
2113
2114void helper_divb_AL(target_ulong t0)
2115{
2116 unsigned int num, den, q, r;
2117
2118 num = (EAX & 0xffff);
2119 den = (t0 & 0xff);
2120 if (den == 0) {
2121 raise_exception(EXCP00_DIVZ);
2122 }
2123 q = (num / den);
2124 if (q > 0xff)
2125 raise_exception(EXCP00_DIVZ);
2126 q &= 0xff;
2127 r = (num % den) & 0xff;
2128 EAX = (EAX & ~0xffff) | (r << 8) | q;
2129}
2130
2131void helper_idivb_AL(target_ulong t0)
2132{
2133 int num, den, q, r;
2134
2135 num = (int16_t)EAX;
2136 den = (int8_t)t0;
2137 if (den == 0) {
2138 raise_exception(EXCP00_DIVZ);
2139 }
2140 q = (num / den);
2141 if (q != (int8_t)q)
2142 raise_exception(EXCP00_DIVZ);
2143 q &= 0xff;
2144 r = (num % den) & 0xff;
2145 EAX = (EAX & ~0xffff) | (r << 8) | q;
2146}
2147
2148void helper_divw_AX(target_ulong t0)
2149{
2150 unsigned int num, den, q, r;
2151
2152 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2153 den = (t0 & 0xffff);
2154 if (den == 0) {
2155 raise_exception(EXCP00_DIVZ);
2156 }
2157 q = (num / den);
2158 if (q > 0xffff)
2159 raise_exception(EXCP00_DIVZ);
2160 q &= 0xffff;
2161 r = (num % den) & 0xffff;
2162 EAX = (EAX & ~0xffff) | q;
2163 EDX = (EDX & ~0xffff) | r;
2164}
2165
2166void helper_idivw_AX(target_ulong t0)
2167{
2168 int num, den, q, r;
2169
2170 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2171 den = (int16_t)t0;
2172 if (den == 0) {
2173 raise_exception(EXCP00_DIVZ);
2174 }
2175 q = (num / den);
2176 if (q != (int16_t)q)
2177 raise_exception(EXCP00_DIVZ);
2178 q &= 0xffff;
2179 r = (num % den) & 0xffff;
2180 EAX = (EAX & ~0xffff) | q;
2181 EDX = (EDX & ~0xffff) | r;
2182}
2183
2184void helper_divl_EAX(target_ulong t0)
2185{
2186 unsigned int den, r;
2187 uint64_t num, q;
2188
2189 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2190 den = t0;
2191 if (den == 0) {
2192 raise_exception(EXCP00_DIVZ);
2193 }
2194 q = (num / den);
2195 r = (num % den);
2196 if (q > 0xffffffff)
2197 raise_exception(EXCP00_DIVZ);
2198 EAX = (uint32_t)q;
2199 EDX = (uint32_t)r;
2200}
2201
2202void helper_idivl_EAX(target_ulong t0)
2203{
2204 int den, r;
2205 int64_t num, q;
2206
2207 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2208 den = t0;
2209 if (den == 0) {
2210 raise_exception(EXCP00_DIVZ);
2211 }
2212 q = (num / den);
2213 r = (num % den);
2214 if (q != (int32_t)q)
2215 raise_exception(EXCP00_DIVZ);
2216 EAX = (uint32_t)q;
2217 EDX = (uint32_t)r;
2218}
2219
2220/* bcd */
2221
2222/* XXX: exception */
2223void helper_aam(int base)
2224{
2225 int al, ah;
2226 al = EAX & 0xff;
2227 ah = al / base;
2228 al = al % base;
2229 EAX = (EAX & ~0xffff) | al | (ah << 8);
2230 CC_DST = al;
2231}
2232
2233void helper_aad(int base)
2234{
2235 int al, ah;
2236 al = EAX & 0xff;
2237 ah = (EAX >> 8) & 0xff;
2238 al = ((ah * base) + al) & 0xff;
2239 EAX = (EAX & ~0xffff) | al;
2240 CC_DST = al;
2241}
2242
2243void helper_aaa(void)
2244{
2245 int icarry;
2246 int al, ah, af;
2247 int eflags;
2248
2249 eflags = helper_cc_compute_all(CC_OP);
2250 af = eflags & CC_A;
2251 al = EAX & 0xff;
2252 ah = (EAX >> 8) & 0xff;
2253
2254 icarry = (al > 0xf9);
2255 if (((al & 0x0f) > 9 ) || af) {
2256 al = (al + 6) & 0x0f;
2257 ah = (ah + 1 + icarry) & 0xff;
2258 eflags |= CC_C | CC_A;
2259 } else {
2260 eflags &= ~(CC_C | CC_A);
2261 al &= 0x0f;
2262 }
2263 EAX = (EAX & ~0xffff) | al | (ah << 8);
2264 CC_SRC = eflags;
2265}
2266
2267void helper_aas(void)
2268{
2269 int icarry;
2270 int al, ah, af;
2271 int eflags;
2272
2273 eflags = helper_cc_compute_all(CC_OP);
2274 af = eflags & CC_A;
2275 al = EAX & 0xff;
2276 ah = (EAX >> 8) & 0xff;
2277
2278 icarry = (al < 6);
2279 if (((al & 0x0f) > 9 ) || af) {
2280 al = (al - 6) & 0x0f;
2281 ah = (ah - 1 - icarry) & 0xff;
2282 eflags |= CC_C | CC_A;
2283 } else {
2284 eflags &= ~(CC_C | CC_A);
2285 al &= 0x0f;
2286 }
2287 EAX = (EAX & ~0xffff) | al | (ah << 8);
2288 CC_SRC = eflags;
2289}
2290
2291void helper_daa(void)
2292{
2293 int al, af, cf;
2294 int eflags;
2295
2296 eflags = helper_cc_compute_all(CC_OP);
2297 cf = eflags & CC_C;
2298 af = eflags & CC_A;
2299 al = EAX & 0xff;
2300
2301 eflags = 0;
2302 if (((al & 0x0f) > 9 ) || af) {
2303 al = (al + 6) & 0xff;
2304 eflags |= CC_A;
2305 }
2306 if ((al > 0x9f) || cf) {
2307 al = (al + 0x60) & 0xff;
2308 eflags |= CC_C;
2309 }
2310 EAX = (EAX & ~0xff) | al;
2311 /* well, speed is not an issue here, so we compute the flags by hand */
2312 eflags |= (al == 0) << 6; /* zf */
2313 eflags |= parity_table[al]; /* pf */
2314 eflags |= (al & 0x80); /* sf */
2315 CC_SRC = eflags;
2316}
2317
2318void helper_das(void)
2319{
2320 int al, al1, af, cf;
2321 int eflags;
2322
2323 eflags = helper_cc_compute_all(CC_OP);
2324 cf = eflags & CC_C;
2325 af = eflags & CC_A;
2326 al = EAX & 0xff;
2327
2328 eflags = 0;
2329 al1 = al;
2330 if (((al & 0x0f) > 9 ) || af) {
2331 eflags |= CC_A;
2332 if (al < 6 || cf)
2333 eflags |= CC_C;
2334 al = (al - 6) & 0xff;
2335 }
2336 if ((al1 > 0x99) || cf) {
2337 al = (al - 0x60) & 0xff;
2338 eflags |= CC_C;
2339 }
2340 EAX = (EAX & ~0xff) | al;
2341 /* well, speed is not an issue here, so we compute the flags by hand */
2342 eflags |= (al == 0) << 6; /* zf */
2343 eflags |= parity_table[al]; /* pf */
2344 eflags |= (al & 0x80); /* sf */
2345 CC_SRC = eflags;
2346}
2347
2348void helper_into(int next_eip_addend)
2349{
2350 int eflags;
2351 eflags = helper_cc_compute_all(CC_OP);
2352 if (eflags & CC_O) {
2353 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2354 }
2355}
2356
2357void helper_cmpxchg8b(target_ulong a0)
2358{
2359 uint64_t d;
2360 int eflags;
2361
2362 eflags = helper_cc_compute_all(CC_OP);
2363 d = ldq(a0);
2364 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2365 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2366 eflags |= CC_Z;
2367 } else {
2368 /* always do the store */
2369 stq(a0, d);
2370 EDX = (uint32_t)(d >> 32);
2371 EAX = (uint32_t)d;
2372 eflags &= ~CC_Z;
2373 }
2374 CC_SRC = eflags;
2375}
2376
2377#ifdef TARGET_X86_64
2378void helper_cmpxchg16b(target_ulong a0)
2379{
2380 uint64_t d0, d1;
2381 int eflags;
2382
2383 if ((a0 & 0xf) != 0)
2384 raise_exception(EXCP0D_GPF);
2385 eflags = helper_cc_compute_all(CC_OP);
2386 d0 = ldq(a0);
2387 d1 = ldq(a0 + 8);
2388 if (d0 == EAX && d1 == EDX) {
2389 stq(a0, EBX);
2390 stq(a0 + 8, ECX);
2391 eflags |= CC_Z;
2392 } else {
2393 /* always do the store */
2394 stq(a0, d0);
2395 stq(a0 + 8, d1);
2396 EDX = d1;
2397 EAX = d0;
2398 eflags &= ~CC_Z;
2399 }
2400 CC_SRC = eflags;
2401}
2402#endif
2403
2404void helper_single_step(void)
2405{
2406#ifndef CONFIG_USER_ONLY
2407 check_hw_breakpoints(env, 1);
2408 env->dr[6] |= DR6_BS;
2409#endif
2410 raise_exception(EXCP01_DB);
2411}
2412
2413void helper_cpuid(void)
2414{
2415 uint32_t eax, ebx, ecx, edx;
2416
2417 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2418
2419 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2420 EAX = eax;
2421 EBX = ebx;
2422 ECX = ecx;
2423 EDX = edx;
2424}
2425
2426void helper_enter_level(int level, int data32, target_ulong t1)
2427{
2428 target_ulong ssp;
2429 uint32_t esp_mask, esp, ebp;
2430
2431 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2432 ssp = env->segs[R_SS].base;
2433 ebp = EBP;
2434 esp = ESP;
2435 if (data32) {
2436 /* 32 bit */
2437 esp -= 4;
2438 while (--level) {
2439 esp -= 4;
2440 ebp -= 4;
2441 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2442 }
2443 esp -= 4;
2444 stl(ssp + (esp & esp_mask), t1);
2445 } else {
2446 /* 16 bit */
2447 esp -= 2;
2448 while (--level) {
2449 esp -= 2;
2450 ebp -= 2;
2451 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2452 }
2453 esp -= 2;
2454 stw(ssp + (esp & esp_mask), t1);
2455 }
2456}
2457
2458#ifdef TARGET_X86_64
2459void helper_enter64_level(int level, int data64, target_ulong t1)
2460{
2461 target_ulong esp, ebp;
2462 ebp = EBP;
2463 esp = ESP;
2464
2465 if (data64) {
2466 /* 64 bit */
2467 esp -= 8;
2468 while (--level) {
2469 esp -= 8;
2470 ebp -= 8;
2471 stq(esp, ldq(ebp));
2472 }
2473 esp -= 8;
2474 stq(esp, t1);
2475 } else {
2476 /* 16 bit */
2477 esp -= 2;
2478 while (--level) {
2479 esp -= 2;
2480 ebp -= 2;
2481 stw(esp, lduw(ebp));
2482 }
2483 esp -= 2;
2484 stw(esp, t1);
2485 }
2486}
2487#endif
2488
2489void helper_lldt(int selector)
2490{
2491 SegmentCache *dt;
2492 uint32_t e1, e2;
2493#ifndef VBOX
2494 int index, entry_limit;
2495#else
2496 unsigned int index, entry_limit;
2497#endif
2498 target_ulong ptr;
2499
2500#ifdef VBOX
2501 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2502 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2503#endif
2504
2505 selector &= 0xffff;
2506 if ((selector & 0xfffc) == 0) {
2507 /* XXX: NULL selector case: invalid LDT */
2508 env->ldt.base = 0;
2509 env->ldt.limit = 0;
2510#ifdef VBOX
2511 env->ldt.flags = DESC_INTEL_UNUSABLE;
2512 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2513 env->ldt.newselector = 0;
2514#endif
2515 } else {
2516 if (selector & 0x4)
2517 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2518 dt = &env->gdt;
2519 index = selector & ~7;
2520#ifdef TARGET_X86_64
2521 if (env->hflags & HF_LMA_MASK)
2522 entry_limit = 15;
2523 else
2524#endif
2525 entry_limit = 7;
2526 if ((index + entry_limit) > dt->limit)
2527 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2528 ptr = dt->base + index;
2529 e1 = ldl_kernel(ptr);
2530 e2 = ldl_kernel(ptr + 4);
2531 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2532 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2533 if (!(e2 & DESC_P_MASK))
2534 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2535#ifdef TARGET_X86_64
2536 if (env->hflags & HF_LMA_MASK) {
2537 uint32_t e3;
2538 e3 = ldl_kernel(ptr + 8);
2539 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2540 env->ldt.base |= (target_ulong)e3 << 32;
2541 } else
2542#endif
2543 {
2544 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2545 }
2546 }
2547 env->ldt.selector = selector;
2548#ifdef VBOX
2549 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2550 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2551#endif
2552}
2553
2554void helper_ltr(int selector)
2555{
2556 SegmentCache *dt;
2557 uint32_t e1, e2;
2558#ifndef VBOX
2559 int index, type, entry_limit;
2560#else
2561 unsigned int index;
2562 int type, entry_limit;
2563#endif
2564 target_ulong ptr;
2565
2566#ifdef VBOX
2567 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2568 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2569 env->tr.flags, (RTSEL)(selector & 0xffff)));
2570#endif
2571 selector &= 0xffff;
2572 if ((selector & 0xfffc) == 0) {
2573 /* NULL selector case: invalid TR */
2574 env->tr.base = 0;
2575 env->tr.limit = 0;
2576 env->tr.flags = 0;
2577#ifdef VBOX
2578 env->tr.flags = DESC_INTEL_UNUSABLE;
2579 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2580 env->tr.newselector = 0;
2581#endif
2582 } else {
2583 if (selector & 0x4)
2584 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2585 dt = &env->gdt;
2586 index = selector & ~7;
2587#ifdef TARGET_X86_64
2588 if (env->hflags & HF_LMA_MASK)
2589 entry_limit = 15;
2590 else
2591#endif
2592 entry_limit = 7;
2593 if ((index + entry_limit) > dt->limit)
2594 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2595 ptr = dt->base + index;
2596 e1 = ldl_kernel(ptr);
2597 e2 = ldl_kernel(ptr + 4);
2598 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2599 if ((e2 & DESC_S_MASK) ||
2600 (type != 1 && type != 9))
2601 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2602 if (!(e2 & DESC_P_MASK))
2603 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2604#ifdef TARGET_X86_64
2605 if (env->hflags & HF_LMA_MASK) {
2606 uint32_t e3, e4;
2607 e3 = ldl_kernel(ptr + 8);
2608 e4 = ldl_kernel(ptr + 12);
2609 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2610 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2611 load_seg_cache_raw_dt(&env->tr, e1, e2);
2612 env->tr.base |= (target_ulong)e3 << 32;
2613 } else
2614#endif
2615 {
2616 load_seg_cache_raw_dt(&env->tr, e1, e2);
2617 }
2618 e2 |= DESC_TSS_BUSY_MASK;
2619 stl_kernel(ptr + 4, e2);
2620 }
2621 env->tr.selector = selector;
2622#ifdef VBOX
2623 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2624 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2625 env->tr.flags, (RTSEL)(selector & 0xffff)));
2626#endif
2627}
2628
2629/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2630void helper_load_seg(int seg_reg, int selector)
2631{
2632 uint32_t e1, e2;
2633 int cpl, dpl, rpl;
2634 SegmentCache *dt;
2635#ifndef VBOX
2636 int index;
2637#else
2638 unsigned int index;
2639#endif
2640 target_ulong ptr;
2641
2642 selector &= 0xffff;
2643 cpl = env->hflags & HF_CPL_MASK;
2644#ifdef VBOX
2645
2646 /* Trying to load a selector with CPL=1? */
2647 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2648 {
2649 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2650 selector = selector & 0xfffc;
2651 }
2652#endif /* VBOX */
2653 if ((selector & 0xfffc) == 0) {
2654 /* null selector case */
2655#ifndef VBOX
2656 if (seg_reg == R_SS
2657#ifdef TARGET_X86_64
2658 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2659#endif
2660 )
2661 raise_exception_err(EXCP0D_GPF, 0);
2662 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2663#else
2664 if (seg_reg == R_SS) {
2665 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2666 raise_exception_err(EXCP0D_GPF, 0);
2667 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2668 } else {
2669 e2 = DESC_INTEL_UNUSABLE;
2670 }
2671 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, e2);
2672#endif
2673 } else {
2674
2675 if (selector & 0x4)
2676 dt = &env->ldt;
2677 else
2678 dt = &env->gdt;
2679 index = selector & ~7;
2680 if ((index + 7) > dt->limit)
2681 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2682 ptr = dt->base + index;
2683 e1 = ldl_kernel(ptr);
2684 e2 = ldl_kernel(ptr + 4);
2685
2686 if (!(e2 & DESC_S_MASK))
2687 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2688 rpl = selector & 3;
2689 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2690 if (seg_reg == R_SS) {
2691 /* must be writable segment */
2692 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2693 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2694 if (rpl != cpl || dpl != cpl)
2695 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2696 } else {
2697 /* must be readable segment */
2698 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2700
2701 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2702 /* if not conforming code, test rights */
2703 if (dpl < cpl || dpl < rpl)
2704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2705 }
2706 }
2707
2708 if (!(e2 & DESC_P_MASK)) {
2709 if (seg_reg == R_SS)
2710 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2711 else
2712 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2713 }
2714
2715 /* set the access bit if not already set */
2716 if (!(e2 & DESC_A_MASK)) {
2717 e2 |= DESC_A_MASK;
2718 stl_kernel(ptr + 4, e2);
2719 }
2720
2721 cpu_x86_load_seg_cache(env, seg_reg, selector,
2722 get_seg_base(e1, e2),
2723 get_seg_limit(e1, e2),
2724 e2);
2725#if 0
2726 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2727 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2728#endif
2729 }
2730}
2731
2732/* protected mode jump */
2733void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2734 int next_eip_addend)
2735{
2736 int gate_cs, type;
2737 uint32_t e1, e2, cpl, dpl, rpl, limit;
2738 target_ulong next_eip;
2739
2740#ifdef VBOX /** @todo Why do we do this? */
2741 e1 = e2 = 0;
2742#endif
2743 if ((new_cs & 0xfffc) == 0)
2744 raise_exception_err(EXCP0D_GPF, 0);
2745 if (load_segment(&e1, &e2, new_cs) != 0)
2746 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2747 cpl = env->hflags & HF_CPL_MASK;
2748 if (e2 & DESC_S_MASK) {
2749 if (!(e2 & DESC_CS_MASK))
2750 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2751 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2752 if (e2 & DESC_C_MASK) {
2753 /* conforming code segment */
2754 if (dpl > cpl)
2755 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2756 } else {
2757 /* non conforming code segment */
2758 rpl = new_cs & 3;
2759 if (rpl > cpl)
2760 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2761 if (dpl != cpl)
2762 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2763 }
2764 if (!(e2 & DESC_P_MASK))
2765 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2766 limit = get_seg_limit(e1, e2);
2767 if (new_eip > limit &&
2768 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2769 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2770 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2771 get_seg_base(e1, e2), limit, e2);
2772 EIP = new_eip;
2773 } else {
2774 /* jump to call or task gate */
2775 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2776 rpl = new_cs & 3;
2777 cpl = env->hflags & HF_CPL_MASK;
2778 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2779 switch(type) {
2780 case 1: /* 286 TSS */
2781 case 9: /* 386 TSS */
2782 case 5: /* task gate */
2783 if (dpl < cpl || dpl < rpl)
2784 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2785 next_eip = env->eip + next_eip_addend;
2786 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2787 CC_OP = CC_OP_EFLAGS;
2788 break;
2789 case 4: /* 286 call gate */
2790 case 12: /* 386 call gate */
2791 if ((dpl < cpl) || (dpl < rpl))
2792 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2793 if (!(e2 & DESC_P_MASK))
2794 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2795 gate_cs = e1 >> 16;
2796 new_eip = (e1 & 0xffff);
2797 if (type == 12)
2798 new_eip |= (e2 & 0xffff0000);
2799 if (load_segment(&e1, &e2, gate_cs) != 0)
2800 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2801 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2802 /* must be code segment */
2803 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2804 (DESC_S_MASK | DESC_CS_MASK)))
2805 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2806 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2807 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2808 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2809 if (!(e2 & DESC_P_MASK))
2810#ifdef VBOX /* See page 3-514 of 253666.pdf */
2811 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2812#else
2813 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2814#endif
2815 limit = get_seg_limit(e1, e2);
2816 if (new_eip > limit)
2817 raise_exception_err(EXCP0D_GPF, 0);
2818 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2819 get_seg_base(e1, e2), limit, e2);
2820 EIP = new_eip;
2821 break;
2822 default:
2823 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2824 break;
2825 }
2826 }
2827}
2828
2829/* real mode call */
2830void helper_lcall_real(int new_cs, target_ulong new_eip1,
2831 int shift, int next_eip)
2832{
2833 int new_eip;
2834 uint32_t esp, esp_mask;
2835 target_ulong ssp;
2836
2837 new_eip = new_eip1;
2838 esp = ESP;
2839 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2840 ssp = env->segs[R_SS].base;
2841 if (shift) {
2842 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2843 PUSHL(ssp, esp, esp_mask, next_eip);
2844 } else {
2845 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2846 PUSHW(ssp, esp, esp_mask, next_eip);
2847 }
2848
2849 SET_ESP(esp, esp_mask);
2850 env->eip = new_eip;
2851 env->segs[R_CS].selector = new_cs;
2852 env->segs[R_CS].base = (new_cs << 4);
2853}
2854
2855/* protected mode call */
2856void helper_lcall_protected(int new_cs, target_ulong new_eip,
2857 int shift, int next_eip_addend)
2858{
2859 int new_stack, i;
2860 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2861 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2862 uint32_t val, limit, old_sp_mask;
2863 target_ulong ssp, old_ssp, next_eip;
2864
2865#ifdef VBOX /** @todo Why do we do this? */
2866 e1 = e2 = 0;
2867#endif
2868 next_eip = env->eip + next_eip_addend;
2869 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2870 LOG_PCALL_STATE(env);
2871 if ((new_cs & 0xfffc) == 0)
2872 raise_exception_err(EXCP0D_GPF, 0);
2873 if (load_segment(&e1, &e2, new_cs) != 0)
2874 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2875 cpl = env->hflags & HF_CPL_MASK;
2876 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2877 if (e2 & DESC_S_MASK) {
2878 if (!(e2 & DESC_CS_MASK))
2879 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2880 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2881 if (e2 & DESC_C_MASK) {
2882 /* conforming code segment */
2883 if (dpl > cpl)
2884 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2885 } else {
2886 /* non conforming code segment */
2887 rpl = new_cs & 3;
2888 if (rpl > cpl)
2889 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2890 if (dpl != cpl)
2891 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2892 }
2893 if (!(e2 & DESC_P_MASK))
2894 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2895
2896#ifdef TARGET_X86_64
2897 /* XXX: check 16/32 bit cases in long mode */
2898 if (shift == 2) {
2899 target_ulong rsp;
2900 /* 64 bit case */
2901 rsp = ESP;
2902 PUSHQ(rsp, env->segs[R_CS].selector);
2903 PUSHQ(rsp, next_eip);
2904 /* from this point, not restartable */
2905 ESP = rsp;
2906 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2907 get_seg_base(e1, e2),
2908 get_seg_limit(e1, e2), e2);
2909 EIP = new_eip;
2910 } else
2911#endif
2912 {
2913 sp = ESP;
2914 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2915 ssp = env->segs[R_SS].base;
2916 if (shift) {
2917 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2918 PUSHL(ssp, sp, sp_mask, next_eip);
2919 } else {
2920 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2921 PUSHW(ssp, sp, sp_mask, next_eip);
2922 }
2923
2924 limit = get_seg_limit(e1, e2);
2925 if (new_eip > limit)
2926 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2927 /* from this point, not restartable */
2928 SET_ESP(sp, sp_mask);
2929 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2930 get_seg_base(e1, e2), limit, e2);
2931 EIP = new_eip;
2932 }
2933 } else {
2934 /* check gate type */
2935 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2937 rpl = new_cs & 3;
2938 switch(type) {
2939 case 1: /* available 286 TSS */
2940 case 9: /* available 386 TSS */
2941 case 5: /* task gate */
2942 if (dpl < cpl || dpl < rpl)
2943 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2944 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2945 CC_OP = CC_OP_EFLAGS;
2946 return;
2947 case 4: /* 286 call gate */
2948 case 12: /* 386 call gate */
2949 break;
2950 default:
2951 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2952 break;
2953 }
2954 shift = type >> 3;
2955
2956 if (dpl < cpl || dpl < rpl)
2957 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2958 /* check valid bit */
2959 if (!(e2 & DESC_P_MASK))
2960 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2961 selector = e1 >> 16;
2962 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2963 param_count = e2 & 0x1f;
2964 if ((selector & 0xfffc) == 0)
2965 raise_exception_err(EXCP0D_GPF, 0);
2966
2967 if (load_segment(&e1, &e2, selector) != 0)
2968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2969 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2970 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2971 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2972 if (dpl > cpl)
2973 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2974 if (!(e2 & DESC_P_MASK))
2975 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2976
2977 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2978 /* to inner privilege */
2979 get_ss_esp_from_tss(&ss, &sp, dpl);
2980 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2981 ss, sp, param_count, ESP);
2982 if ((ss & 0xfffc) == 0)
2983 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2984 if ((ss & 3) != dpl)
2985 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2986 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2987 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2988 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2989 if (ss_dpl != dpl)
2990 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2991 if (!(ss_e2 & DESC_S_MASK) ||
2992 (ss_e2 & DESC_CS_MASK) ||
2993 !(ss_e2 & DESC_W_MASK))
2994 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2995 if (!(ss_e2 & DESC_P_MASK))
2996#ifdef VBOX /* See page 3-99 of 253666.pdf */
2997 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2998#else
2999 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3000#endif
3001
3002 // push_size = ((param_count * 2) + 8) << shift;
3003
3004 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3005 old_ssp = env->segs[R_SS].base;
3006
3007 sp_mask = get_sp_mask(ss_e2);
3008 ssp = get_seg_base(ss_e1, ss_e2);
3009 if (shift) {
3010 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3011 PUSHL(ssp, sp, sp_mask, ESP);
3012 for(i = param_count - 1; i >= 0; i--) {
3013 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3014 PUSHL(ssp, sp, sp_mask, val);
3015 }
3016 } else {
3017 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3018 PUSHW(ssp, sp, sp_mask, ESP);
3019 for(i = param_count - 1; i >= 0; i--) {
3020 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3021 PUSHW(ssp, sp, sp_mask, val);
3022 }
3023 }
3024 new_stack = 1;
3025 } else {
3026 /* to same privilege */
3027 sp = ESP;
3028 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3029 ssp = env->segs[R_SS].base;
3030 // push_size = (4 << shift);
3031 new_stack = 0;
3032 }
3033
3034 if (shift) {
3035 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3036 PUSHL(ssp, sp, sp_mask, next_eip);
3037 } else {
3038 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3039 PUSHW(ssp, sp, sp_mask, next_eip);
3040 }
3041
3042 /* from this point, not restartable */
3043
3044 if (new_stack) {
3045 ss = (ss & ~3) | dpl;
3046 cpu_x86_load_seg_cache(env, R_SS, ss,
3047 ssp,
3048 get_seg_limit(ss_e1, ss_e2),
3049 ss_e2);
3050 }
3051
3052 selector = (selector & ~3) | dpl;
3053 cpu_x86_load_seg_cache(env, R_CS, selector,
3054 get_seg_base(e1, e2),
3055 get_seg_limit(e1, e2),
3056 e2);
3057 cpu_x86_set_cpl(env, dpl);
3058 SET_ESP(sp, sp_mask);
3059 EIP = offset;
3060 }
3061}
3062
3063/* real and vm86 mode iret */
3064void helper_iret_real(int shift)
3065{
3066 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3067 target_ulong ssp;
3068 int eflags_mask;
3069#ifdef VBOX
3070 bool fVME = false;
3071
3072 remR3TrapClear(env->pVM);
3073#endif /* VBOX */
3074
3075 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3076 sp = ESP;
3077 ssp = env->segs[R_SS].base;
3078 if (shift == 1) {
3079 /* 32 bits */
3080 POPL(ssp, sp, sp_mask, new_eip);
3081 POPL(ssp, sp, sp_mask, new_cs);
3082 new_cs &= 0xffff;
3083 POPL(ssp, sp, sp_mask, new_eflags);
3084 } else {
3085 /* 16 bits */
3086 POPW(ssp, sp, sp_mask, new_eip);
3087 POPW(ssp, sp, sp_mask, new_cs);
3088 POPW(ssp, sp, sp_mask, new_eflags);
3089 }
3090#ifdef VBOX
3091 if ( (env->eflags & VM_MASK)
3092 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3093 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3094 {
3095 fVME = true;
3096 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3097 /* if TF will be set -> #GP */
3098 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3099 || (new_eflags & TF_MASK))
3100 raise_exception(EXCP0D_GPF);
3101 }
3102#endif /* VBOX */
3103 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3104 env->segs[R_CS].selector = new_cs;
3105 env->segs[R_CS].base = (new_cs << 4);
3106 env->eip = new_eip;
3107#ifdef VBOX
3108 if (fVME)
3109 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3110 else
3111#endif
3112 if (env->eflags & VM_MASK)
3113 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3114 else
3115 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3116 if (shift == 0)
3117 eflags_mask &= 0xffff;
3118 load_eflags(new_eflags, eflags_mask);
3119 env->hflags2 &= ~HF2_NMI_MASK;
3120#ifdef VBOX
3121 if (fVME)
3122 {
3123 if (new_eflags & IF_MASK)
3124 env->eflags |= VIF_MASK;
3125 else
3126 env->eflags &= ~VIF_MASK;
3127 }
3128#endif /* VBOX */
3129}
3130
3131static inline void validate_seg(int seg_reg, int cpl)
3132{
3133 int dpl;
3134 uint32_t e2;
3135
3136 /* XXX: on x86_64, we do not want to nullify FS and GS because
3137 they may still contain a valid base. I would be interested to
3138 know how a real x86_64 CPU behaves */
3139 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3140 (env->segs[seg_reg].selector & 0xfffc) == 0)
3141 return;
3142
3143 e2 = env->segs[seg_reg].flags;
3144 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3145 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3146 /* data or non conforming code segment */
3147 if (dpl < cpl) {
3148 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3149 }
3150 }
3151}
3152
3153/* protected mode iret */
3154static inline void helper_ret_protected(int shift, int is_iret, int addend)
3155{
3156 uint32_t new_cs, new_eflags, new_ss;
3157 uint32_t new_es, new_ds, new_fs, new_gs;
3158 uint32_t e1, e2, ss_e1, ss_e2;
3159 int cpl, dpl, rpl, eflags_mask, iopl;
3160 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3161
3162#ifdef VBOX /** @todo Why do we do this? */
3163 ss_e1 = ss_e2 = e1 = e2 = 0;
3164#endif
3165
3166#ifdef TARGET_X86_64
3167 if (shift == 2)
3168 sp_mask = -1;
3169 else
3170#endif
3171 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3172 sp = ESP;
3173 ssp = env->segs[R_SS].base;
3174 new_eflags = 0; /* avoid warning */
3175#ifdef TARGET_X86_64
3176 if (shift == 2) {
3177 POPQ(sp, new_eip);
3178 POPQ(sp, new_cs);
3179 new_cs &= 0xffff;
3180 if (is_iret) {
3181 POPQ(sp, new_eflags);
3182 }
3183 } else
3184#endif
3185 if (shift == 1) {
3186 /* 32 bits */
3187 POPL(ssp, sp, sp_mask, new_eip);
3188 POPL(ssp, sp, sp_mask, new_cs);
3189 new_cs &= 0xffff;
3190 if (is_iret) {
3191 POPL(ssp, sp, sp_mask, new_eflags);
3192#define LOG_GROUP LOG_GROUP_REM
3193#if defined(VBOX) && defined(DEBUG)
3194 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3195 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3196 Log(("iret: new EFLAGS %08X\n", new_eflags));
3197 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3198#endif
3199 if (new_eflags & VM_MASK)
3200 goto return_to_vm86;
3201 }
3202#ifdef VBOX
3203 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3204 {
3205 if ( !EMIsRawRing1Enabled(env->pVM)
3206 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3207 {
3208 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3209 new_cs = new_cs & 0xfffc;
3210 }
3211 else
3212 {
3213 /* Ugly assumption: assume a genuine switch to ring-1. */
3214 Log(("Genuine switch to ring-1 (iret)\n"));
3215 }
3216 }
3217 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3218 {
3219 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3220 new_cs = (new_cs & 0xfffc) | 1;
3221 }
3222#endif
3223 } else {
3224 /* 16 bits */
3225 POPW(ssp, sp, sp_mask, new_eip);
3226 POPW(ssp, sp, sp_mask, new_cs);
3227 if (is_iret)
3228 POPW(ssp, sp, sp_mask, new_eflags);
3229 }
3230 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3231 new_cs, new_eip, shift, addend);
3232 LOG_PCALL_STATE(env);
3233 if ((new_cs & 0xfffc) == 0)
3234 {
3235#if defined(VBOX) && defined(DEBUG)
3236 Log(("new_cs & 0xfffc) == 0\n"));
3237#endif
3238 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3239 }
3240 if (load_segment(&e1, &e2, new_cs) != 0)
3241 {
3242#if defined(VBOX) && defined(DEBUG)
3243 Log(("load_segment failed\n"));
3244#endif
3245 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3246 }
3247 if (!(e2 & DESC_S_MASK) ||
3248 !(e2 & DESC_CS_MASK))
3249 {
3250#if defined(VBOX) && defined(DEBUG)
3251 Log(("e2 mask %08x\n", e2));
3252#endif
3253 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3254 }
3255 cpl = env->hflags & HF_CPL_MASK;
3256 rpl = new_cs & 3;
3257 if (rpl < cpl)
3258 {
3259#if defined(VBOX) && defined(DEBUG)
3260 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3261#endif
3262 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3263 }
3264 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3265
3266 if (e2 & DESC_C_MASK) {
3267 if (dpl > rpl)
3268 {
3269#if defined(VBOX) && defined(DEBUG)
3270 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3271#endif
3272 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3273 }
3274 } else {
3275 if (dpl != rpl)
3276 {
3277#if defined(VBOX) && defined(DEBUG)
3278 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3279#endif
3280 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3281 }
3282 }
3283 if (!(e2 & DESC_P_MASK))
3284 {
3285#if defined(VBOX) && defined(DEBUG)
3286 Log(("DESC_P_MASK e2=%08x\n", e2));
3287#endif
3288 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3289 }
3290
3291 sp += addend;
3292 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3293 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3294 /* return to same privilege level */
3295#ifdef VBOX
3296 if (!(e2 & DESC_A_MASK))
3297 e2 = set_segment_accessed(new_cs, e2);
3298#endif
3299 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3300 get_seg_base(e1, e2),
3301 get_seg_limit(e1, e2),
3302 e2);
3303 } else {
3304 /* return to different privilege level */
3305#ifdef TARGET_X86_64
3306 if (shift == 2) {
3307 POPQ(sp, new_esp);
3308 POPQ(sp, new_ss);
3309 new_ss &= 0xffff;
3310 } else
3311#endif
3312 if (shift == 1) {
3313 /* 32 bits */
3314 POPL(ssp, sp, sp_mask, new_esp);
3315 POPL(ssp, sp, sp_mask, new_ss);
3316 new_ss &= 0xffff;
3317 } else {
3318 /* 16 bits */
3319 POPW(ssp, sp, sp_mask, new_esp);
3320 POPW(ssp, sp, sp_mask, new_ss);
3321 }
3322 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3323 new_ss, new_esp);
3324 if ((new_ss & 0xfffc) == 0) {
3325#ifdef TARGET_X86_64
3326 /* NULL ss is allowed in long mode if cpl != 3*/
3327 /* XXX: test CS64 ? */
3328 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3329# ifdef VBOX
3330 if (!(e2 & DESC_A_MASK))
3331 e2 = set_segment_accessed(new_cs, e2);
3332# endif
3333 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3334 0, 0xffffffff,
3335 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3336 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3337 DESC_W_MASK | DESC_A_MASK);
3338 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3339 } else
3340#endif
3341 {
3342#if defined(VBOX) && defined(DEBUG)
3343 Log(("NULL ss, rpl=%d\n", rpl));
3344#endif
3345 raise_exception_err(EXCP0D_GPF, 0);
3346 }
3347 } else {
3348 if ((new_ss & 3) != rpl)
3349 {
3350#if defined(VBOX) && defined(DEBUG)
3351 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3352#endif
3353 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3354 }
3355 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3356 {
3357#if defined(VBOX) && defined(DEBUG)
3358 Log(("new_ss=%x load error\n", new_ss));
3359#endif
3360 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3361 }
3362 if (!(ss_e2 & DESC_S_MASK) ||
3363 (ss_e2 & DESC_CS_MASK) ||
3364 !(ss_e2 & DESC_W_MASK))
3365 {
3366#if defined(VBOX) && defined(DEBUG)
3367 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3368#endif
3369 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3370 }
3371 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3372 if (dpl != rpl)
3373 {
3374#if defined(VBOX) && defined(DEBUG)
3375 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3376#endif
3377 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3378 }
3379 if (!(ss_e2 & DESC_P_MASK))
3380 {
3381#if defined(VBOX) && defined(DEBUG)
3382 Log(("new_ss=%#x #NP\n", new_ss));
3383#endif
3384 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3385 }
3386#ifdef VBOX
3387 if (!(e2 & DESC_A_MASK))
3388 e2 = set_segment_accessed(new_cs, e2);
3389 if (!(ss_e2 & DESC_A_MASK))
3390 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3391#endif
3392 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3393 get_seg_base(ss_e1, ss_e2),
3394 get_seg_limit(ss_e1, ss_e2),
3395 ss_e2);
3396 }
3397
3398 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3399 get_seg_base(e1, e2),
3400 get_seg_limit(e1, e2),
3401 e2);
3402 cpu_x86_set_cpl(env, rpl);
3403 sp = new_esp;
3404#ifdef TARGET_X86_64
3405 if (env->hflags & HF_CS64_MASK)
3406 sp_mask = -1;
3407 else
3408#endif
3409 sp_mask = get_sp_mask(ss_e2);
3410
3411 /* validate data segments */
3412 validate_seg(R_ES, rpl);
3413 validate_seg(R_DS, rpl);
3414 validate_seg(R_FS, rpl);
3415 validate_seg(R_GS, rpl);
3416
3417 sp += addend;
3418 }
3419 SET_ESP(sp, sp_mask);
3420 env->eip = new_eip;
3421 if (is_iret) {
3422 /* NOTE: 'cpl' is the _old_ CPL */
3423 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3424 if (cpl == 0)
3425#ifdef VBOX
3426 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3427#else
3428 eflags_mask |= IOPL_MASK;
3429#endif
3430 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3431 if (cpl <= iopl)
3432 eflags_mask |= IF_MASK;
3433 if (shift == 0)
3434 eflags_mask &= 0xffff;
3435 load_eflags(new_eflags, eflags_mask);
3436 }
3437 return;
3438
3439 return_to_vm86:
3440 POPL(ssp, sp, sp_mask, new_esp);
3441 POPL(ssp, sp, sp_mask, new_ss);
3442 POPL(ssp, sp, sp_mask, new_es);
3443 POPL(ssp, sp, sp_mask, new_ds);
3444 POPL(ssp, sp, sp_mask, new_fs);
3445 POPL(ssp, sp, sp_mask, new_gs);
3446
3447 /* modify processor state */
3448 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3449 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3450 load_seg_vm(R_CS, new_cs & 0xffff);
3451 cpu_x86_set_cpl(env, 3);
3452 load_seg_vm(R_SS, new_ss & 0xffff);
3453 load_seg_vm(R_ES, new_es & 0xffff);
3454 load_seg_vm(R_DS, new_ds & 0xffff);
3455 load_seg_vm(R_FS, new_fs & 0xffff);
3456 load_seg_vm(R_GS, new_gs & 0xffff);
3457
3458 env->eip = new_eip & 0xffff;
3459 ESP = new_esp;
3460}
3461
3462void helper_iret_protected(int shift, int next_eip)
3463{
3464 int tss_selector, type;
3465 uint32_t e1, e2;
3466
3467#ifdef VBOX
3468 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3469 e1 = e2 = 0; /** @todo Why do we do this? */
3470 remR3TrapClear(env->pVM);
3471#endif
3472
3473 /* specific case for TSS */
3474 if (env->eflags & NT_MASK) {
3475#ifdef TARGET_X86_64
3476 if (env->hflags & HF_LMA_MASK)
3477 {
3478#if defined(VBOX) && defined(DEBUG)
3479 Log(("eflags.NT=1 on iret in long mode\n"));
3480#endif
3481 raise_exception_err(EXCP0D_GPF, 0);
3482 }
3483#endif
3484 tss_selector = lduw_kernel(env->tr.base + 0);
3485 if (tss_selector & 4)
3486 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3487 if (load_segment(&e1, &e2, tss_selector) != 0)
3488 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3489 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3490 /* NOTE: we check both segment and busy TSS */
3491 if (type != 3)
3492 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3493 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3494 } else {
3495 helper_ret_protected(shift, 1, 0);
3496 }
3497 env->hflags2 &= ~HF2_NMI_MASK;
3498}
3499
3500void helper_lret_protected(int shift, int addend)
3501{
3502 helper_ret_protected(shift, 0, addend);
3503}
3504
3505void helper_sysenter(void)
3506{
3507 if (env->sysenter_cs == 0) {
3508 raise_exception_err(EXCP0D_GPF, 0);
3509 }
3510 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3511 cpu_x86_set_cpl(env, 0);
3512
3513#ifdef TARGET_X86_64
3514 if (env->hflags & HF_LMA_MASK) {
3515 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3516 0, 0xffffffff,
3517 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3518 DESC_S_MASK |
3519 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3520 } else
3521#endif
3522 {
3523 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3524 0, 0xffffffff,
3525 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3526 DESC_S_MASK |
3527 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3528 }
3529 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3530 0, 0xffffffff,
3531 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3532 DESC_S_MASK |
3533 DESC_W_MASK | DESC_A_MASK);
3534 ESP = env->sysenter_esp;
3535 EIP = env->sysenter_eip;
3536}
3537
3538void helper_sysexit(int dflag)
3539{
3540 int cpl;
3541
3542 cpl = env->hflags & HF_CPL_MASK;
3543 if (env->sysenter_cs == 0 || cpl != 0) {
3544 raise_exception_err(EXCP0D_GPF, 0);
3545 }
3546 cpu_x86_set_cpl(env, 3);
3547#ifdef TARGET_X86_64
3548 if (dflag == 2) {
3549 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3550 0, 0xffffffff,
3551 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3552 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3553 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3554 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3555 0, 0xffffffff,
3556 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3557 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3558 DESC_W_MASK | DESC_A_MASK);
3559 } else
3560#endif
3561 {
3562 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3563 0, 0xffffffff,
3564 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3565 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3566 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3567 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3568 0, 0xffffffff,
3569 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3570 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3571 DESC_W_MASK | DESC_A_MASK);
3572 }
3573 ESP = ECX;
3574 EIP = EDX;
3575}
3576
3577#if defined(CONFIG_USER_ONLY)
3578target_ulong helper_read_crN(int reg)
3579{
3580 return 0;
3581}
3582
3583void helper_write_crN(int reg, target_ulong t0)
3584{
3585}
3586
3587void helper_movl_drN_T0(int reg, target_ulong t0)
3588{
3589}
3590#else
3591target_ulong helper_read_crN(int reg)
3592{
3593 target_ulong val;
3594
3595 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3596 switch(reg) {
3597 default:
3598 val = env->cr[reg];
3599 break;
3600 case 8:
3601 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3602#ifndef VBOX
3603 val = cpu_get_apic_tpr(env->apic_state);
3604#else /* VBOX */
3605 val = cpu_get_apic_tpr(env);
3606#endif /* VBOX */
3607 } else {
3608 val = env->v_tpr;
3609 }
3610 break;
3611 }
3612 return val;
3613}
3614
3615void helper_write_crN(int reg, target_ulong t0)
3616{
3617 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3618 switch(reg) {
3619 case 0:
3620 cpu_x86_update_cr0(env, t0);
3621 break;
3622 case 3:
3623 cpu_x86_update_cr3(env, t0);
3624 break;
3625 case 4:
3626 cpu_x86_update_cr4(env, t0);
3627 break;
3628 case 8:
3629 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3630#ifndef VBOX
3631 cpu_set_apic_tpr(env->apic_state, t0);
3632#else /* VBOX */
3633 cpu_set_apic_tpr(env, t0);
3634#endif /* VBOX */
3635 }
3636 env->v_tpr = t0 & 0x0f;
3637 break;
3638 default:
3639 env->cr[reg] = t0;
3640 break;
3641 }
3642}
3643
3644void helper_movl_drN_T0(int reg, target_ulong t0)
3645{
3646 int i;
3647
3648 if (reg < 4) {
3649 hw_breakpoint_remove(env, reg);
3650 env->dr[reg] = t0;
3651 hw_breakpoint_insert(env, reg);
3652# ifndef VBOX
3653 } else if (reg == 7) {
3654# else
3655 } else if (reg == 7 || reg == 5) {
3656# endif
3657 for (i = 0; i < 4; i++)
3658 hw_breakpoint_remove(env, i);
3659 env->dr[7] = t0;
3660 for (i = 0; i < 4; i++)
3661 hw_breakpoint_insert(env, i);
3662 } else
3663# ifndef VBOX
3664 env->dr[reg] = t0;
3665# else
3666 env->dr[6] = (t0 & ~RT_BIT_32(12)) | UINT32_C(0xffff0ff0); /* 4 is an alias for 6. */
3667# endif
3668}
3669#endif
3670
3671void helper_lmsw(target_ulong t0)
3672{
3673 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3674 if already set to one. */
3675 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3676 helper_write_crN(0, t0);
3677}
3678
3679void helper_clts(void)
3680{
3681 env->cr[0] &= ~CR0_TS_MASK;
3682 env->hflags &= ~HF_TS_MASK;
3683}
3684
3685void helper_invlpg(target_ulong addr)
3686{
3687 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3688 tlb_flush_page(env, addr);
3689}
3690
3691void helper_rdtsc(void)
3692{
3693 uint64_t val;
3694
3695 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3696 raise_exception(EXCP0D_GPF);
3697 }
3698 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3699
3700 val = cpu_get_tsc(env) + env->tsc_offset;
3701 EAX = (uint32_t)(val);
3702 EDX = (uint32_t)(val >> 32);
3703}
3704
3705void helper_rdtscp(void)
3706{
3707 helper_rdtsc();
3708#ifndef VBOX
3709 ECX = (uint32_t)(env->tsc_aux);
3710#else /* VBOX */
3711 uint64_t val;
3712 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3713 ECX = (uint32_t)(val);
3714 else
3715 ECX = 0;
3716#endif /* VBOX */
3717}
3718
3719void helper_rdpmc(void)
3720{
3721#ifdef VBOX
3722 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3723 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3724 raise_exception(EXCP0D_GPF);
3725 }
3726 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3727 EAX = 0;
3728 EDX = 0;
3729#else /* !VBOX */
3730 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3731 raise_exception(EXCP0D_GPF);
3732 }
3733 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3734
3735 /* currently unimplemented */
3736 raise_exception_err(EXCP06_ILLOP, 0);
3737#endif /* !VBOX */
3738}
3739
3740#if defined(CONFIG_USER_ONLY)
3741void helper_wrmsr(void)
3742{
3743}
3744
3745void helper_rdmsr(void)
3746{
3747}
3748#else
3749void helper_wrmsr(void)
3750{
3751 uint64_t val;
3752
3753 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3754
3755 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3756
3757 switch((uint32_t)ECX) {
3758 case MSR_IA32_SYSENTER_CS:
3759 env->sysenter_cs = val & 0xffff;
3760 break;
3761 case MSR_IA32_SYSENTER_ESP:
3762 env->sysenter_esp = val;
3763 break;
3764 case MSR_IA32_SYSENTER_EIP:
3765 env->sysenter_eip = val;
3766 break;
3767 case MSR_IA32_APICBASE:
3768# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3769 cpu_set_apic_base(env->apic_state, val);
3770# endif
3771 break;
3772 case MSR_EFER:
3773 {
3774 uint64_t update_mask;
3775 update_mask = 0;
3776 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3777 update_mask |= MSR_EFER_SCE;
3778 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3779 update_mask |= MSR_EFER_LME;
3780 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3781 update_mask |= MSR_EFER_FFXSR;
3782 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3783 update_mask |= MSR_EFER_NXE;
3784 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3785 update_mask |= MSR_EFER_SVME;
3786 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3787 update_mask |= MSR_EFER_FFXSR;
3788 cpu_load_efer(env, (env->efer & ~update_mask) |
3789 (val & update_mask));
3790 }
3791 break;
3792 case MSR_STAR:
3793 env->star = val;
3794 break;
3795 case MSR_PAT:
3796 env->pat = val;
3797 break;
3798 case MSR_VM_HSAVE_PA:
3799 env->vm_hsave = val;
3800 break;
3801#ifdef TARGET_X86_64
3802 case MSR_LSTAR:
3803 env->lstar = val;
3804 break;
3805 case MSR_CSTAR:
3806 env->cstar = val;
3807 break;
3808 case MSR_FMASK:
3809 env->fmask = val;
3810 break;
3811 case MSR_FSBASE:
3812 env->segs[R_FS].base = val;
3813 break;
3814 case MSR_GSBASE:
3815 env->segs[R_GS].base = val;
3816 break;
3817 case MSR_KERNELGSBASE:
3818 env->kernelgsbase = val;
3819 break;
3820#endif
3821# ifndef VBOX
3822 case MSR_MTRRphysBase(0):
3823 case MSR_MTRRphysBase(1):
3824 case MSR_MTRRphysBase(2):
3825 case MSR_MTRRphysBase(3):
3826 case MSR_MTRRphysBase(4):
3827 case MSR_MTRRphysBase(5):
3828 case MSR_MTRRphysBase(6):
3829 case MSR_MTRRphysBase(7):
3830 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3831 break;
3832 case MSR_MTRRphysMask(0):
3833 case MSR_MTRRphysMask(1):
3834 case MSR_MTRRphysMask(2):
3835 case MSR_MTRRphysMask(3):
3836 case MSR_MTRRphysMask(4):
3837 case MSR_MTRRphysMask(5):
3838 case MSR_MTRRphysMask(6):
3839 case MSR_MTRRphysMask(7):
3840 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3841 break;
3842 case MSR_MTRRfix64K_00000:
3843 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3844 break;
3845 case MSR_MTRRfix16K_80000:
3846 case MSR_MTRRfix16K_A0000:
3847 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3848 break;
3849 case MSR_MTRRfix4K_C0000:
3850 case MSR_MTRRfix4K_C8000:
3851 case MSR_MTRRfix4K_D0000:
3852 case MSR_MTRRfix4K_D8000:
3853 case MSR_MTRRfix4K_E0000:
3854 case MSR_MTRRfix4K_E8000:
3855 case MSR_MTRRfix4K_F0000:
3856 case MSR_MTRRfix4K_F8000:
3857 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3858 break;
3859 case MSR_MTRRdefType:
3860 env->mtrr_deftype = val;
3861 break;
3862 case MSR_MCG_STATUS:
3863 env->mcg_status = val;
3864 break;
3865 case MSR_MCG_CTL:
3866 if ((env->mcg_cap & MCG_CTL_P)
3867 && (val == 0 || val == ~(uint64_t)0))
3868 env->mcg_ctl = val;
3869 break;
3870 case MSR_TSC_AUX:
3871 env->tsc_aux = val;
3872 break;
3873# endif /* !VBOX */
3874 default:
3875# ifndef VBOX
3876 if ((uint32_t)ECX >= MSR_MC0_CTL
3877 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3878 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3879 if ((offset & 0x3) != 0
3880 || (val == 0 || val == ~(uint64_t)0))
3881 env->mce_banks[offset] = val;
3882 break;
3883 }
3884 /* XXX: exception ? */
3885# endif
3886 break;
3887 }
3888
3889# ifdef VBOX
3890 /* call CPUM. */
3891 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3892 {
3893 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3894 }
3895# endif
3896}
3897
3898void helper_rdmsr(void)
3899{
3900 uint64_t val;
3901
3902 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3903
3904 switch((uint32_t)ECX) {
3905 case MSR_IA32_SYSENTER_CS:
3906 val = env->sysenter_cs;
3907 break;
3908 case MSR_IA32_SYSENTER_ESP:
3909 val = env->sysenter_esp;
3910 break;
3911 case MSR_IA32_SYSENTER_EIP:
3912 val = env->sysenter_eip;
3913 break;
3914 case MSR_IA32_APICBASE:
3915#ifndef VBOX
3916 val = cpu_get_apic_base(env->apic_state);
3917#else /* VBOX */
3918 val = cpu_get_apic_base(env);
3919#endif /* VBOX */
3920 break;
3921 case MSR_EFER:
3922 val = env->efer;
3923 break;
3924 case MSR_STAR:
3925 val = env->star;
3926 break;
3927 case MSR_PAT:
3928 val = env->pat;
3929 break;
3930 case MSR_VM_HSAVE_PA:
3931 val = env->vm_hsave;
3932 break;
3933# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3934 case MSR_IA32_PERF_STATUS:
3935 /* tsc_increment_by_tick */
3936 val = 1000ULL;
3937 /* CPU multiplier */
3938 val |= (((uint64_t)4ULL) << 40);
3939 break;
3940# endif /* !VBOX */
3941#ifdef TARGET_X86_64
3942 case MSR_LSTAR:
3943 val = env->lstar;
3944 break;
3945 case MSR_CSTAR:
3946 val = env->cstar;
3947 break;
3948 case MSR_FMASK:
3949 val = env->fmask;
3950 break;
3951 case MSR_FSBASE:
3952 val = env->segs[R_FS].base;
3953 break;
3954 case MSR_GSBASE:
3955 val = env->segs[R_GS].base;
3956 break;
3957 case MSR_KERNELGSBASE:
3958 val = env->kernelgsbase;
3959 break;
3960# ifndef VBOX
3961 case MSR_TSC_AUX:
3962 val = env->tsc_aux;
3963 break;
3964# endif /*!VBOX*/
3965#endif
3966# ifndef VBOX
3967 case MSR_MTRRphysBase(0):
3968 case MSR_MTRRphysBase(1):
3969 case MSR_MTRRphysBase(2):
3970 case MSR_MTRRphysBase(3):
3971 case MSR_MTRRphysBase(4):
3972 case MSR_MTRRphysBase(5):
3973 case MSR_MTRRphysBase(6):
3974 case MSR_MTRRphysBase(7):
3975 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3976 break;
3977 case MSR_MTRRphysMask(0):
3978 case MSR_MTRRphysMask(1):
3979 case MSR_MTRRphysMask(2):
3980 case MSR_MTRRphysMask(3):
3981 case MSR_MTRRphysMask(4):
3982 case MSR_MTRRphysMask(5):
3983 case MSR_MTRRphysMask(6):
3984 case MSR_MTRRphysMask(7):
3985 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3986 break;
3987 case MSR_MTRRfix64K_00000:
3988 val = env->mtrr_fixed[0];
3989 break;
3990 case MSR_MTRRfix16K_80000:
3991 case MSR_MTRRfix16K_A0000:
3992 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3993 break;
3994 case MSR_MTRRfix4K_C0000:
3995 case MSR_MTRRfix4K_C8000:
3996 case MSR_MTRRfix4K_D0000:
3997 case MSR_MTRRfix4K_D8000:
3998 case MSR_MTRRfix4K_E0000:
3999 case MSR_MTRRfix4K_E8000:
4000 case MSR_MTRRfix4K_F0000:
4001 case MSR_MTRRfix4K_F8000:
4002 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4003 break;
4004 case MSR_MTRRdefType:
4005 val = env->mtrr_deftype;
4006 break;
4007 case MSR_MTRRcap:
4008 if (env->cpuid_features & CPUID_MTRR)
4009 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4010 else
4011 /* XXX: exception ? */
4012 val = 0;
4013 break;
4014 case MSR_MCG_CAP:
4015 val = env->mcg_cap;
4016 break;
4017 case MSR_MCG_CTL:
4018 if (env->mcg_cap & MCG_CTL_P)
4019 val = env->mcg_ctl;
4020 else
4021 val = 0;
4022 break;
4023 case MSR_MCG_STATUS:
4024 val = env->mcg_status;
4025 break;
4026# endif /* !VBOX */
4027 default:
4028# ifndef VBOX
4029 if ((uint32_t)ECX >= MSR_MC0_CTL
4030 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4031 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4032 val = env->mce_banks[offset];
4033 break;
4034 }
4035 /* XXX: exception ? */
4036 val = 0;
4037# else /* VBOX */
4038 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4039 {
4040 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4041 val = 0;
4042 }
4043# endif /* VBOX */
4044 break;
4045 }
4046 EAX = (uint32_t)(val);
4047 EDX = (uint32_t)(val >> 32);
4048
4049# ifdef VBOX_STRICT
4050 if ((uint32_t)ECX != MSR_IA32_TSC) {
4051 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4052 val = 0;
4053 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4054 }
4055# endif
4056}
4057#endif
4058
4059target_ulong helper_lsl(target_ulong selector1)
4060{
4061 unsigned int limit;
4062 uint32_t e1, e2, eflags, selector;
4063 int rpl, dpl, cpl, type;
4064
4065 selector = selector1 & 0xffff;
4066 eflags = helper_cc_compute_all(CC_OP);
4067 if ((selector & 0xfffc) == 0)
4068 goto fail;
4069 if (load_segment(&e1, &e2, selector) != 0)
4070 goto fail;
4071 rpl = selector & 3;
4072 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4073 cpl = env->hflags & HF_CPL_MASK;
4074 if (e2 & DESC_S_MASK) {
4075 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4076 /* conforming */
4077 } else {
4078 if (dpl < cpl || dpl < rpl)
4079 goto fail;
4080 }
4081 } else {
4082 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4083 switch(type) {
4084 case 1:
4085 case 2:
4086 case 3:
4087 case 9:
4088 case 11:
4089 break;
4090 default:
4091 goto fail;
4092 }
4093 if (dpl < cpl || dpl < rpl) {
4094 fail:
4095 CC_SRC = eflags & ~CC_Z;
4096 return 0;
4097 }
4098 }
4099 limit = get_seg_limit(e1, e2);
4100 CC_SRC = eflags | CC_Z;
4101 return limit;
4102}
4103
4104target_ulong helper_lar(target_ulong selector1)
4105{
4106 uint32_t e1, e2, eflags, selector;
4107 int rpl, dpl, cpl, type;
4108
4109 selector = selector1 & 0xffff;
4110 eflags = helper_cc_compute_all(CC_OP);
4111 if ((selector & 0xfffc) == 0)
4112 goto fail;
4113 if (load_segment(&e1, &e2, selector) != 0)
4114 goto fail;
4115 rpl = selector & 3;
4116 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4117 cpl = env->hflags & HF_CPL_MASK;
4118 if (e2 & DESC_S_MASK) {
4119 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4120 /* conforming */
4121 } else {
4122 if (dpl < cpl || dpl < rpl)
4123 goto fail;
4124 }
4125 } else {
4126 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4127 switch(type) {
4128 case 1:
4129 case 2:
4130 case 3:
4131 case 4:
4132 case 5:
4133 case 9:
4134 case 11:
4135 case 12:
4136 break;
4137 default:
4138 goto fail;
4139 }
4140 if (dpl < cpl || dpl < rpl) {
4141 fail:
4142 CC_SRC = eflags & ~CC_Z;
4143 return 0;
4144 }
4145 }
4146 CC_SRC = eflags | CC_Z;
4147 return e2 & 0x00f0ff00;
4148}
4149
4150void helper_verr(target_ulong selector1)
4151{
4152 uint32_t e1, e2, eflags, selector;
4153 int rpl, dpl, cpl;
4154
4155 selector = selector1 & 0xffff;
4156 eflags = helper_cc_compute_all(CC_OP);
4157 if ((selector & 0xfffc) == 0)
4158 goto fail;
4159 if (load_segment(&e1, &e2, selector) != 0)
4160 goto fail;
4161 if (!(e2 & DESC_S_MASK))
4162 goto fail;
4163 rpl = selector & 3;
4164 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4165 cpl = env->hflags & HF_CPL_MASK;
4166 if (e2 & DESC_CS_MASK) {
4167 if (!(e2 & DESC_R_MASK))
4168 goto fail;
4169 if (!(e2 & DESC_C_MASK)) {
4170 if (dpl < cpl || dpl < rpl)
4171 goto fail;
4172 }
4173 } else {
4174 if (dpl < cpl || dpl < rpl) {
4175 fail:
4176 CC_SRC = eflags & ~CC_Z;
4177 return;
4178 }
4179 }
4180 CC_SRC = eflags | CC_Z;
4181}
4182
4183void helper_verw(target_ulong selector1)
4184{
4185 uint32_t e1, e2, eflags, selector;
4186 int rpl, dpl, cpl;
4187
4188 selector = selector1 & 0xffff;
4189 eflags = helper_cc_compute_all(CC_OP);
4190 if ((selector & 0xfffc) == 0)
4191 goto fail;
4192 if (load_segment(&e1, &e2, selector) != 0)
4193 goto fail;
4194 if (!(e2 & DESC_S_MASK))
4195 goto fail;
4196 rpl = selector & 3;
4197 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4198 cpl = env->hflags & HF_CPL_MASK;
4199 if (e2 & DESC_CS_MASK) {
4200 goto fail;
4201 } else {
4202 if (dpl < cpl || dpl < rpl)
4203 goto fail;
4204 if (!(e2 & DESC_W_MASK)) {
4205 fail:
4206 CC_SRC = eflags & ~CC_Z;
4207 return;
4208 }
4209 }
4210 CC_SRC = eflags | CC_Z;
4211}
4212
4213/* x87 FPU helpers */
4214
4215static void fpu_set_exception(int mask)
4216{
4217 env->fpus |= mask;
4218 if (env->fpus & (~env->fpuc & FPUC_EM))
4219 env->fpus |= FPUS_SE | FPUS_B;
4220}
4221
4222static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4223{
4224 if (b == 0.0)
4225 fpu_set_exception(FPUS_ZE);
4226 return a / b;
4227}
4228
4229static void fpu_raise_exception(void)
4230{
4231 if (env->cr[0] & CR0_NE_MASK) {
4232 raise_exception(EXCP10_COPR);
4233 }
4234#if !defined(CONFIG_USER_ONLY)
4235 else {
4236 cpu_set_ferr(env);
4237 }
4238#endif
4239}
4240
4241void helper_flds_FT0(uint32_t val)
4242{
4243 union {
4244 float32 f;
4245 uint32_t i;
4246 } u;
4247 u.i = val;
4248 FT0 = float32_to_floatx(u.f, &env->fp_status);
4249}
4250
4251void helper_fldl_FT0(uint64_t val)
4252{
4253 union {
4254 float64 f;
4255 uint64_t i;
4256 } u;
4257 u.i = val;
4258 FT0 = float64_to_floatx(u.f, &env->fp_status);
4259}
4260
4261void helper_fildl_FT0(int32_t val)
4262{
4263 FT0 = int32_to_floatx(val, &env->fp_status);
4264}
4265
4266void helper_flds_ST0(uint32_t val)
4267{
4268 int new_fpstt;
4269 union {
4270 float32 f;
4271 uint32_t i;
4272 } u;
4273 new_fpstt = (env->fpstt - 1) & 7;
4274 u.i = val;
4275 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4276 env->fpstt = new_fpstt;
4277 env->fptags[new_fpstt] = 0; /* validate stack entry */
4278}
4279
4280void helper_fldl_ST0(uint64_t val)
4281{
4282 int new_fpstt;
4283 union {
4284 float64 f;
4285 uint64_t i;
4286 } u;
4287 new_fpstt = (env->fpstt - 1) & 7;
4288 u.i = val;
4289 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4290 env->fpstt = new_fpstt;
4291 env->fptags[new_fpstt] = 0; /* validate stack entry */
4292}
4293
4294void helper_fildl_ST0(int32_t val)
4295{
4296 int new_fpstt;
4297 new_fpstt = (env->fpstt - 1) & 7;
4298 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4299 env->fpstt = new_fpstt;
4300 env->fptags[new_fpstt] = 0; /* validate stack entry */
4301}
4302
4303void helper_fildll_ST0(int64_t val)
4304{
4305 int new_fpstt;
4306 new_fpstt = (env->fpstt - 1) & 7;
4307 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4308 env->fpstt = new_fpstt;
4309 env->fptags[new_fpstt] = 0; /* validate stack entry */
4310}
4311
4312#ifndef VBOX
4313uint32_t helper_fsts_ST0(void)
4314#else
4315RTCCUINTREG helper_fsts_ST0(void)
4316#endif
4317{
4318 union {
4319 float32 f;
4320 uint32_t i;
4321 } u;
4322 u.f = floatx_to_float32(ST0, &env->fp_status);
4323 return u.i;
4324}
4325
4326uint64_t helper_fstl_ST0(void)
4327{
4328 union {
4329 float64 f;
4330 uint64_t i;
4331 } u;
4332 u.f = floatx_to_float64(ST0, &env->fp_status);
4333 return u.i;
4334}
4335
4336#ifndef VBOX
4337int32_t helper_fist_ST0(void)
4338#else
4339RTCCINTREG helper_fist_ST0(void)
4340#endif
4341{
4342 int32_t val;
4343 val = floatx_to_int32(ST0, &env->fp_status);
4344 if (val != (int16_t)val)
4345 val = -32768;
4346 return val;
4347}
4348
4349#ifndef VBOX
4350int32_t helper_fistl_ST0(void)
4351#else
4352RTCCINTREG helper_fistl_ST0(void)
4353#endif
4354{
4355 int32_t val;
4356 val = floatx_to_int32(ST0, &env->fp_status);
4357 return val;
4358}
4359
4360int64_t helper_fistll_ST0(void)
4361{
4362 int64_t val;
4363 val = floatx_to_int64(ST0, &env->fp_status);
4364 return val;
4365}
4366
4367#ifndef VBOX
4368int32_t helper_fistt_ST0(void)
4369#else
4370RTCCINTREG helper_fistt_ST0(void)
4371#endif
4372{
4373 int32_t val;
4374 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4375 if (val != (int16_t)val)
4376 val = -32768;
4377 return val;
4378}
4379
4380#ifndef VBOX
4381int32_t helper_fisttl_ST0(void)
4382#else
4383RTCCINTREG helper_fisttl_ST0(void)
4384#endif
4385{
4386 int32_t val;
4387 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4388 return val;
4389}
4390
4391int64_t helper_fisttll_ST0(void)
4392{
4393 int64_t val;
4394 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4395 return val;
4396}
4397
4398void helper_fldt_ST0(target_ulong ptr)
4399{
4400 int new_fpstt;
4401 new_fpstt = (env->fpstt - 1) & 7;
4402 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4403 env->fpstt = new_fpstt;
4404 env->fptags[new_fpstt] = 0; /* validate stack entry */
4405}
4406
4407void helper_fstt_ST0(target_ulong ptr)
4408{
4409 helper_fstt(ST0, ptr);
4410}
4411
4412void helper_fpush(void)
4413{
4414 fpush();
4415}
4416
4417void helper_fpop(void)
4418{
4419 fpop();
4420}
4421
4422void helper_fdecstp(void)
4423{
4424 env->fpstt = (env->fpstt - 1) & 7;
4425 env->fpus &= (~0x4700);
4426}
4427
4428void helper_fincstp(void)
4429{
4430 env->fpstt = (env->fpstt + 1) & 7;
4431 env->fpus &= (~0x4700);
4432}
4433
4434/* FPU move */
4435
4436void helper_ffree_STN(int st_index)
4437{
4438 env->fptags[(env->fpstt + st_index) & 7] = 1;
4439}
4440
4441void helper_fmov_ST0_FT0(void)
4442{
4443 ST0 = FT0;
4444}
4445
4446void helper_fmov_FT0_STN(int st_index)
4447{
4448 FT0 = ST(st_index);
4449}
4450
4451void helper_fmov_ST0_STN(int st_index)
4452{
4453 ST0 = ST(st_index);
4454}
4455
4456void helper_fmov_STN_ST0(int st_index)
4457{
4458 ST(st_index) = ST0;
4459}
4460
4461void helper_fxchg_ST0_STN(int st_index)
4462{
4463 CPU86_LDouble tmp;
4464 tmp = ST(st_index);
4465 ST(st_index) = ST0;
4466 ST0 = tmp;
4467}
4468
4469/* FPU operations */
4470
4471static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4472
4473void helper_fcom_ST0_FT0(void)
4474{
4475 int ret;
4476
4477 ret = floatx_compare(ST0, FT0, &env->fp_status);
4478 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4479}
4480
4481void helper_fucom_ST0_FT0(void)
4482{
4483 int ret;
4484
4485 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4486 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4487}
4488
4489static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4490
4491void helper_fcomi_ST0_FT0(void)
4492{
4493 int eflags;
4494 int ret;
4495
4496 ret = floatx_compare(ST0, FT0, &env->fp_status);
4497 eflags = helper_cc_compute_all(CC_OP);
4498 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4499 CC_SRC = eflags;
4500}
4501
4502void helper_fucomi_ST0_FT0(void)
4503{
4504 int eflags;
4505 int ret;
4506
4507 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4508 eflags = helper_cc_compute_all(CC_OP);
4509 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4510 CC_SRC = eflags;
4511}
4512
4513void helper_fadd_ST0_FT0(void)
4514{
4515 ST0 += FT0;
4516}
4517
4518void helper_fmul_ST0_FT0(void)
4519{
4520 ST0 *= FT0;
4521}
4522
4523void helper_fsub_ST0_FT0(void)
4524{
4525 ST0 -= FT0;
4526}
4527
4528void helper_fsubr_ST0_FT0(void)
4529{
4530 ST0 = FT0 - ST0;
4531}
4532
4533void helper_fdiv_ST0_FT0(void)
4534{
4535 ST0 = helper_fdiv(ST0, FT0);
4536}
4537
4538void helper_fdivr_ST0_FT0(void)
4539{
4540 ST0 = helper_fdiv(FT0, ST0);
4541}
4542
4543/* fp operations between STN and ST0 */
4544
4545void helper_fadd_STN_ST0(int st_index)
4546{
4547 ST(st_index) += ST0;
4548}
4549
4550void helper_fmul_STN_ST0(int st_index)
4551{
4552 ST(st_index) *= ST0;
4553}
4554
4555void helper_fsub_STN_ST0(int st_index)
4556{
4557 ST(st_index) -= ST0;
4558}
4559
4560void helper_fsubr_STN_ST0(int st_index)
4561{
4562 CPU86_LDouble *p;
4563 p = &ST(st_index);
4564 *p = ST0 - *p;
4565}
4566
4567void helper_fdiv_STN_ST0(int st_index)
4568{
4569 CPU86_LDouble *p;
4570 p = &ST(st_index);
4571 *p = helper_fdiv(*p, ST0);
4572}
4573
4574void helper_fdivr_STN_ST0(int st_index)
4575{
4576 CPU86_LDouble *p;
4577 p = &ST(st_index);
4578 *p = helper_fdiv(ST0, *p);
4579}
4580
4581/* misc FPU operations */
4582void helper_fchs_ST0(void)
4583{
4584 ST0 = floatx_chs(ST0);
4585}
4586
4587void helper_fabs_ST0(void)
4588{
4589 ST0 = floatx_abs(ST0);
4590}
4591
4592void helper_fld1_ST0(void)
4593{
4594 ST0 = f15rk[1];
4595}
4596
4597void helper_fldl2t_ST0(void)
4598{
4599 ST0 = f15rk[6];
4600}
4601
4602void helper_fldl2e_ST0(void)
4603{
4604 ST0 = f15rk[5];
4605}
4606
4607void helper_fldpi_ST0(void)
4608{
4609 ST0 = f15rk[2];
4610}
4611
4612void helper_fldlg2_ST0(void)
4613{
4614 ST0 = f15rk[3];
4615}
4616
4617void helper_fldln2_ST0(void)
4618{
4619 ST0 = f15rk[4];
4620}
4621
4622void helper_fldz_ST0(void)
4623{
4624 ST0 = f15rk[0];
4625}
4626
4627void helper_fldz_FT0(void)
4628{
4629 FT0 = f15rk[0];
4630}
4631
4632#ifndef VBOX
4633uint32_t helper_fnstsw(void)
4634#else
4635RTCCUINTREG helper_fnstsw(void)
4636#endif
4637{
4638 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4639}
4640
4641#ifndef VBOX
4642uint32_t helper_fnstcw(void)
4643#else
4644RTCCUINTREG helper_fnstcw(void)
4645#endif
4646{
4647 return env->fpuc;
4648}
4649
4650static void update_fp_status(void)
4651{
4652 int rnd_type;
4653
4654 /* set rounding mode */
4655 switch(env->fpuc & RC_MASK) {
4656 default:
4657 case RC_NEAR:
4658 rnd_type = float_round_nearest_even;
4659 break;
4660 case RC_DOWN:
4661 rnd_type = float_round_down;
4662 break;
4663 case RC_UP:
4664 rnd_type = float_round_up;
4665 break;
4666 case RC_CHOP:
4667 rnd_type = float_round_to_zero;
4668 break;
4669 }
4670 set_float_rounding_mode(rnd_type, &env->fp_status);
4671#ifdef FLOATX80
4672 switch((env->fpuc >> 8) & 3) {
4673 case 0:
4674 rnd_type = 32;
4675 break;
4676 case 2:
4677 rnd_type = 64;
4678 break;
4679 case 3:
4680 default:
4681 rnd_type = 80;
4682 break;
4683 }
4684 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4685#endif
4686}
4687
4688void helper_fldcw(uint32_t val)
4689{
4690 env->fpuc = val;
4691 update_fp_status();
4692}
4693
4694void helper_fclex(void)
4695{
4696 env->fpus &= 0x7f00;
4697}
4698
4699void helper_fwait(void)
4700{
4701 if (env->fpus & FPUS_SE)
4702 fpu_raise_exception();
4703}
4704
4705void helper_fninit(void)
4706{
4707 env->fpus = 0;
4708 env->fpstt = 0;
4709 env->fpuc = 0x37f;
4710 env->fptags[0] = 1;
4711 env->fptags[1] = 1;
4712 env->fptags[2] = 1;
4713 env->fptags[3] = 1;
4714 env->fptags[4] = 1;
4715 env->fptags[5] = 1;
4716 env->fptags[6] = 1;
4717 env->fptags[7] = 1;
4718}
4719
4720/* BCD ops */
4721
4722void helper_fbld_ST0(target_ulong ptr)
4723{
4724 CPU86_LDouble tmp;
4725 uint64_t val;
4726 unsigned int v;
4727 int i;
4728
4729 val = 0;
4730 for(i = 8; i >= 0; i--) {
4731 v = ldub(ptr + i);
4732 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4733 }
4734 tmp = val;
4735 if (ldub(ptr + 9) & 0x80)
4736 tmp = -tmp;
4737 fpush();
4738 ST0 = tmp;
4739}
4740
4741void helper_fbst_ST0(target_ulong ptr)
4742{
4743 int v;
4744 target_ulong mem_ref, mem_end;
4745 int64_t val;
4746
4747 val = floatx_to_int64(ST0, &env->fp_status);
4748 mem_ref = ptr;
4749 mem_end = mem_ref + 9;
4750 if (val < 0) {
4751 stb(mem_end, 0x80);
4752 val = -val;
4753 } else {
4754 stb(mem_end, 0x00);
4755 }
4756 while (mem_ref < mem_end) {
4757 if (val == 0)
4758 break;
4759 v = val % 100;
4760 val = val / 100;
4761 v = ((v / 10) << 4) | (v % 10);
4762 stb(mem_ref++, v);
4763 }
4764 while (mem_ref < mem_end) {
4765 stb(mem_ref++, 0);
4766 }
4767}
4768
4769void helper_f2xm1(void)
4770{
4771 ST0 = pow(2.0,ST0) - 1.0;
4772}
4773
4774void helper_fyl2x(void)
4775{
4776 CPU86_LDouble fptemp;
4777
4778 fptemp = ST0;
4779 if (fptemp>0.0){
4780 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4781 ST1 *= fptemp;
4782 fpop();
4783 } else {
4784 env->fpus &= (~0x4700);
4785 env->fpus |= 0x400;
4786 }
4787}
4788
4789void helper_fptan(void)
4790{
4791 CPU86_LDouble fptemp;
4792
4793 fptemp = ST0;
4794 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4795 env->fpus |= 0x400;
4796 } else {
4797 ST0 = tan(fptemp);
4798 fpush();
4799 ST0 = 1.0;
4800 env->fpus &= (~0x400); /* C2 <-- 0 */
4801 /* the above code is for |arg| < 2**52 only */
4802 }
4803}
4804
4805void helper_fpatan(void)
4806{
4807 CPU86_LDouble fptemp, fpsrcop;
4808
4809 fpsrcop = ST1;
4810 fptemp = ST0;
4811 ST1 = atan2(fpsrcop,fptemp);
4812 fpop();
4813}
4814
4815void helper_fxtract(void)
4816{
4817 CPU86_LDoubleU temp;
4818 unsigned int expdif;
4819
4820 temp.d = ST0;
4821 expdif = EXPD(temp) - EXPBIAS;
4822 /*DP exponent bias*/
4823 ST0 = expdif;
4824 fpush();
4825 BIASEXPONENT(temp);
4826 ST0 = temp.d;
4827}
4828
4829void helper_fprem1(void)
4830{
4831 CPU86_LDouble dblq, fpsrcop, fptemp;
4832 CPU86_LDoubleU fpsrcop1, fptemp1;
4833 int expdif;
4834 signed long long int q;
4835
4836#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4837 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4838#else
4839 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4840#endif
4841 ST0 = 0.0 / 0.0; /* NaN */
4842 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4843 return;
4844 }
4845
4846 fpsrcop = ST0;
4847 fptemp = ST1;
4848 fpsrcop1.d = fpsrcop;
4849 fptemp1.d = fptemp;
4850 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4851
4852 if (expdif < 0) {
4853 /* optimisation? taken from the AMD docs */
4854 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4855 /* ST0 is unchanged */
4856 return;
4857 }
4858
4859 if (expdif < 53) {
4860 dblq = fpsrcop / fptemp;
4861 /* round dblq towards nearest integer */
4862 dblq = rint(dblq);
4863 ST0 = fpsrcop - fptemp * dblq;
4864
4865 /* convert dblq to q by truncating towards zero */
4866 if (dblq < 0.0)
4867 q = (signed long long int)(-dblq);
4868 else
4869 q = (signed long long int)dblq;
4870
4871 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4872 /* (C0,C3,C1) <-- (q2,q1,q0) */
4873 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4874 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4875 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4876 } else {
4877 env->fpus |= 0x400; /* C2 <-- 1 */
4878 fptemp = pow(2.0, expdif - 50);
4879 fpsrcop = (ST0 / ST1) / fptemp;
4880 /* fpsrcop = integer obtained by chopping */
4881 fpsrcop = (fpsrcop < 0.0) ?
4882 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4883 ST0 -= (ST1 * fpsrcop * fptemp);
4884 }
4885}
4886
4887void helper_fprem(void)
4888{
4889 CPU86_LDouble dblq, fpsrcop, fptemp;
4890 CPU86_LDoubleU fpsrcop1, fptemp1;
4891 int expdif;
4892 signed long long int q;
4893
4894#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4895 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4896#else
4897 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4898#endif
4899 ST0 = 0.0 / 0.0; /* NaN */
4900 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4901 return;
4902 }
4903
4904 fpsrcop = (CPU86_LDouble)ST0;
4905 fptemp = (CPU86_LDouble)ST1;
4906 fpsrcop1.d = fpsrcop;
4907 fptemp1.d = fptemp;
4908 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4909
4910 if (expdif < 0) {
4911 /* optimisation? taken from the AMD docs */
4912 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4913 /* ST0 is unchanged */
4914 return;
4915 }
4916
4917 if ( expdif < 53 ) {
4918 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4919 /* round dblq towards zero */
4920 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4921 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4922
4923 /* convert dblq to q by truncating towards zero */
4924 if (dblq < 0.0)
4925 q = (signed long long int)(-dblq);
4926 else
4927 q = (signed long long int)dblq;
4928
4929 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4930 /* (C0,C3,C1) <-- (q2,q1,q0) */
4931 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4932 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4933 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4934 } else {
4935 int N = 32 + (expdif % 32); /* as per AMD docs */
4936 env->fpus |= 0x400; /* C2 <-- 1 */
4937 fptemp = pow(2.0, (double)(expdif - N));
4938 fpsrcop = (ST0 / ST1) / fptemp;
4939 /* fpsrcop = integer obtained by chopping */
4940 fpsrcop = (fpsrcop < 0.0) ?
4941 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4942 ST0 -= (ST1 * fpsrcop * fptemp);
4943 }
4944}
4945
4946void helper_fyl2xp1(void)
4947{
4948 CPU86_LDouble fptemp;
4949
4950 fptemp = ST0;
4951 if ((fptemp+1.0)>0.0) {
4952 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4953 ST1 *= fptemp;
4954 fpop();
4955 } else {
4956 env->fpus &= (~0x4700);
4957 env->fpus |= 0x400;
4958 }
4959}
4960
4961void helper_fsqrt(void)
4962{
4963 CPU86_LDouble fptemp;
4964
4965 fptemp = ST0;
4966 if (fptemp<0.0) {
4967 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4968 env->fpus |= 0x400;
4969 }
4970 ST0 = sqrt(fptemp);
4971}
4972
4973void helper_fsincos(void)
4974{
4975 CPU86_LDouble fptemp;
4976
4977 fptemp = ST0;
4978 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4979 env->fpus |= 0x400;
4980 } else {
4981 ST0 = sin(fptemp);
4982 fpush();
4983 ST0 = cos(fptemp);
4984 env->fpus &= (~0x400); /* C2 <-- 0 */
4985 /* the above code is for |arg| < 2**63 only */
4986 }
4987}
4988
4989void helper_frndint(void)
4990{
4991 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4992}
4993
4994void helper_fscale(void)
4995{
4996 ST0 = ldexp (ST0, (int)(ST1));
4997}
4998
4999void helper_fsin(void)
5000{
5001 CPU86_LDouble fptemp;
5002
5003 fptemp = ST0;
5004 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5005 env->fpus |= 0x400;
5006 } else {
5007 ST0 = sin(fptemp);
5008 env->fpus &= (~0x400); /* C2 <-- 0 */
5009 /* the above code is for |arg| < 2**53 only */
5010 }
5011}
5012
5013void helper_fcos(void)
5014{
5015 CPU86_LDouble fptemp;
5016
5017 fptemp = ST0;
5018 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5019 env->fpus |= 0x400;
5020 } else {
5021 ST0 = cos(fptemp);
5022 env->fpus &= (~0x400); /* C2 <-- 0 */
5023 /* the above code is for |arg5 < 2**63 only */
5024 }
5025}
5026
5027void helper_fxam_ST0(void)
5028{
5029 CPU86_LDoubleU temp;
5030 int expdif;
5031
5032 temp.d = ST0;
5033
5034 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5035 if (SIGND(temp))
5036 env->fpus |= 0x200; /* C1 <-- 1 */
5037
5038 /* XXX: test fptags too */
5039 expdif = EXPD(temp);
5040 if (expdif == MAXEXPD) {
5041#ifdef USE_X86LDOUBLE
5042 if (MANTD(temp) == 0x8000000000000000ULL)
5043#else
5044 if (MANTD(temp) == 0)
5045#endif
5046 env->fpus |= 0x500 /*Infinity*/;
5047 else
5048 env->fpus |= 0x100 /*NaN*/;
5049 } else if (expdif == 0) {
5050 if (MANTD(temp) == 0)
5051 env->fpus |= 0x4000 /*Zero*/;
5052 else
5053 env->fpus |= 0x4400 /*Denormal*/;
5054 } else {
5055 env->fpus |= 0x400;
5056 }
5057}
5058
5059void helper_fstenv(target_ulong ptr, int data32)
5060{
5061 int fpus, fptag, exp, i;
5062 uint64_t mant;
5063 CPU86_LDoubleU tmp;
5064
5065 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5066 fptag = 0;
5067 for (i=7; i>=0; i--) {
5068 fptag <<= 2;
5069 if (env->fptags[i]) {
5070 fptag |= 3;
5071 } else {
5072 tmp.d = env->fpregs[i].d;
5073 exp = EXPD(tmp);
5074 mant = MANTD(tmp);
5075 if (exp == 0 && mant == 0) {
5076 /* zero */
5077 fptag |= 1;
5078 } else if (exp == 0 || exp == MAXEXPD
5079#ifdef USE_X86LDOUBLE
5080 || (mant & (1LL << 63)) == 0
5081#endif
5082 ) {
5083 /* NaNs, infinity, denormal */
5084 fptag |= 2;
5085 }
5086 }
5087 }
5088 if (data32) {
5089 /* 32 bit */
5090 stl(ptr, env->fpuc);
5091 stl(ptr + 4, fpus);
5092 stl(ptr + 8, fptag);
5093 stl(ptr + 12, 0); /* fpip */
5094 stl(ptr + 16, 0); /* fpcs */
5095 stl(ptr + 20, 0); /* fpoo */
5096 stl(ptr + 24, 0); /* fpos */
5097 } else {
5098 /* 16 bit */
5099 stw(ptr, env->fpuc);
5100 stw(ptr + 2, fpus);
5101 stw(ptr + 4, fptag);
5102 stw(ptr + 6, 0);
5103 stw(ptr + 8, 0);
5104 stw(ptr + 10, 0);
5105 stw(ptr + 12, 0);
5106 }
5107}
5108
5109void helper_fldenv(target_ulong ptr, int data32)
5110{
5111 int i, fpus, fptag;
5112
5113 if (data32) {
5114 env->fpuc = lduw(ptr);
5115 fpus = lduw(ptr + 4);
5116 fptag = lduw(ptr + 8);
5117 }
5118 else {
5119 env->fpuc = lduw(ptr);
5120 fpus = lduw(ptr + 2);
5121 fptag = lduw(ptr + 4);
5122 }
5123 env->fpstt = (fpus >> 11) & 7;
5124 env->fpus = fpus & ~0x3800;
5125 for(i = 0;i < 8; i++) {
5126 env->fptags[i] = ((fptag & 3) == 3);
5127 fptag >>= 2;
5128 }
5129}
5130
5131void helper_fsave(target_ulong ptr, int data32)
5132{
5133 CPU86_LDouble tmp;
5134 int i;
5135
5136 helper_fstenv(ptr, data32);
5137
5138 ptr += (14 << data32);
5139 for(i = 0;i < 8; i++) {
5140 tmp = ST(i);
5141 helper_fstt(tmp, ptr);
5142 ptr += 10;
5143 }
5144
5145 /* fninit */
5146 env->fpus = 0;
5147 env->fpstt = 0;
5148 env->fpuc = 0x37f;
5149 env->fptags[0] = 1;
5150 env->fptags[1] = 1;
5151 env->fptags[2] = 1;
5152 env->fptags[3] = 1;
5153 env->fptags[4] = 1;
5154 env->fptags[5] = 1;
5155 env->fptags[6] = 1;
5156 env->fptags[7] = 1;
5157}
5158
5159void helper_frstor(target_ulong ptr, int data32)
5160{
5161 CPU86_LDouble tmp;
5162 int i;
5163
5164 helper_fldenv(ptr, data32);
5165 ptr += (14 << data32);
5166
5167 for(i = 0;i < 8; i++) {
5168 tmp = helper_fldt(ptr);
5169 ST(i) = tmp;
5170 ptr += 10;
5171 }
5172}
5173
5174void helper_fxsave(target_ulong ptr, int data64)
5175{
5176 int fpus, fptag, i, nb_xmm_regs;
5177 CPU86_LDouble tmp;
5178 target_ulong addr;
5179
5180 /* The operand must be 16 byte aligned */
5181 if (ptr & 0xf) {
5182 raise_exception(EXCP0D_GPF);
5183 }
5184
5185 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5186 fptag = 0;
5187 for(i = 0; i < 8; i++) {
5188 fptag |= (env->fptags[i] << i);
5189 }
5190 stw(ptr, env->fpuc);
5191 stw(ptr + 2, fpus);
5192 stw(ptr + 4, fptag ^ 0xff);
5193#ifdef TARGET_X86_64
5194 if (data64) {
5195 stq(ptr + 0x08, 0); /* rip */
5196 stq(ptr + 0x10, 0); /* rdp */
5197 } else
5198#endif
5199 {
5200 stl(ptr + 0x08, 0); /* eip */
5201 stl(ptr + 0x0c, 0); /* sel */
5202 stl(ptr + 0x10, 0); /* dp */
5203 stl(ptr + 0x14, 0); /* sel */
5204 }
5205
5206 addr = ptr + 0x20;
5207 for(i = 0;i < 8; i++) {
5208 tmp = ST(i);
5209 helper_fstt(tmp, addr);
5210 addr += 16;
5211 }
5212
5213 if (env->cr[4] & CR4_OSFXSR_MASK) {
5214 /* XXX: finish it */
5215 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5216 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5217 if (env->hflags & HF_CS64_MASK)
5218 nb_xmm_regs = 16;
5219 else
5220 nb_xmm_regs = 8;
5221 addr = ptr + 0xa0;
5222 /* Fast FXSAVE leaves out the XMM registers */
5223 if (!(env->efer & MSR_EFER_FFXSR)
5224 || (env->hflags & HF_CPL_MASK)
5225 || !(env->hflags & HF_LMA_MASK)) {
5226 for(i = 0; i < nb_xmm_regs; i++) {
5227 stq(addr, env->xmm_regs[i].XMM_Q(0));
5228 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5229 addr += 16;
5230 }
5231 }
5232 }
5233}
5234
5235void helper_fxrstor(target_ulong ptr, int data64)
5236{
5237 int i, fpus, fptag, nb_xmm_regs;
5238 CPU86_LDouble tmp;
5239 target_ulong addr;
5240
5241 /* The operand must be 16 byte aligned */
5242 if (ptr & 0xf) {
5243 raise_exception(EXCP0D_GPF);
5244 }
5245
5246 env->fpuc = lduw(ptr);
5247 fpus = lduw(ptr + 2);
5248 fptag = lduw(ptr + 4);
5249 env->fpstt = (fpus >> 11) & 7;
5250 env->fpus = fpus & ~0x3800;
5251 fptag ^= 0xff;
5252 for(i = 0;i < 8; i++) {
5253 env->fptags[i] = ((fptag >> i) & 1);
5254 }
5255
5256 addr = ptr + 0x20;
5257 for(i = 0;i < 8; i++) {
5258 tmp = helper_fldt(addr);
5259 ST(i) = tmp;
5260 addr += 16;
5261 }
5262
5263 if (env->cr[4] & CR4_OSFXSR_MASK) {
5264 /* XXX: finish it */
5265 env->mxcsr = ldl(ptr + 0x18);
5266 //ldl(ptr + 0x1c);
5267 if (env->hflags & HF_CS64_MASK)
5268 nb_xmm_regs = 16;
5269 else
5270 nb_xmm_regs = 8;
5271 addr = ptr + 0xa0;
5272 /* Fast FXRESTORE leaves out the XMM registers */
5273 if (!(env->efer & MSR_EFER_FFXSR)
5274 || (env->hflags & HF_CPL_MASK)
5275 || !(env->hflags & HF_LMA_MASK)) {
5276 for(i = 0; i < nb_xmm_regs; i++) {
5277#if !defined(VBOX) || __GNUC__ < 4
5278 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5279 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5280#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5281# if 1
5282 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5283 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5284 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5285 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5286# else
5287 /* this works fine on Mac OS X, gcc 4.0.1 */
5288 uint64_t u64 = ldq(addr);
5289 env->xmm_regs[i].XMM_Q(0);
5290 u64 = ldq(addr + 4);
5291 env->xmm_regs[i].XMM_Q(1) = u64;
5292# endif
5293#endif
5294 addr += 16;
5295 }
5296 }
5297 }
5298}
5299
5300#ifndef USE_X86LDOUBLE
5301
5302void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5303{
5304 CPU86_LDoubleU temp;
5305 int e;
5306
5307 temp.d = f;
5308 /* mantissa */
5309 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5310 /* exponent + sign */
5311 e = EXPD(temp) - EXPBIAS + 16383;
5312 e |= SIGND(temp) >> 16;
5313 *pexp = e;
5314}
5315
5316CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5317{
5318 CPU86_LDoubleU temp;
5319 int e;
5320 uint64_t ll;
5321
5322 /* XXX: handle overflow ? */
5323 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5324 e |= (upper >> 4) & 0x800; /* sign */
5325 ll = (mant >> 11) & ((1LL << 52) - 1);
5326#ifdef __arm__
5327 temp.l.upper = (e << 20) | (ll >> 32);
5328 temp.l.lower = ll;
5329#else
5330 temp.ll = ll | ((uint64_t)e << 52);
5331#endif
5332 return temp.d;
5333}
5334
5335#else
5336
5337void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5338{
5339 CPU86_LDoubleU temp;
5340
5341 temp.d = f;
5342 *pmant = temp.l.lower;
5343 *pexp = temp.l.upper;
5344}
5345
5346CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5347{
5348 CPU86_LDoubleU temp;
5349
5350 temp.l.upper = upper;
5351 temp.l.lower = mant;
5352 return temp.d;
5353}
5354#endif
5355
5356#ifdef TARGET_X86_64
5357
5358//#define DEBUG_MULDIV
5359
5360static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5361{
5362 *plow += a;
5363 /* carry test */
5364 if (*plow < a)
5365 (*phigh)++;
5366 *phigh += b;
5367}
5368
5369static void neg128(uint64_t *plow, uint64_t *phigh)
5370{
5371 *plow = ~ *plow;
5372 *phigh = ~ *phigh;
5373 add128(plow, phigh, 1, 0);
5374}
5375
5376/* return TRUE if overflow */
5377static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5378{
5379 uint64_t q, r, a1, a0;
5380 int i, qb, ab;
5381
5382 a0 = *plow;
5383 a1 = *phigh;
5384 if (a1 == 0) {
5385 q = a0 / b;
5386 r = a0 % b;
5387 *plow = q;
5388 *phigh = r;
5389 } else {
5390 if (a1 >= b)
5391 return 1;
5392 /* XXX: use a better algorithm */
5393 for(i = 0; i < 64; i++) {
5394 ab = a1 >> 63;
5395 a1 = (a1 << 1) | (a0 >> 63);
5396 if (ab || a1 >= b) {
5397 a1 -= b;
5398 qb = 1;
5399 } else {
5400 qb = 0;
5401 }
5402 a0 = (a0 << 1) | qb;
5403 }
5404#if defined(DEBUG_MULDIV)
5405 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5406 *phigh, *plow, b, a0, a1);
5407#endif
5408 *plow = a0;
5409 *phigh = a1;
5410 }
5411 return 0;
5412}
5413
5414/* return TRUE if overflow */
5415static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5416{
5417 int sa, sb;
5418 sa = ((int64_t)*phigh < 0);
5419 if (sa)
5420 neg128(plow, phigh);
5421 sb = (b < 0);
5422 if (sb)
5423 b = -b;
5424 if (div64(plow, phigh, b) != 0)
5425 return 1;
5426 if (sa ^ sb) {
5427 if (*plow > (1ULL << 63))
5428 return 1;
5429 *plow = - *plow;
5430 } else {
5431 if (*plow >= (1ULL << 63))
5432 return 1;
5433 }
5434 if (sa)
5435 *phigh = - *phigh;
5436 return 0;
5437}
5438
5439void helper_mulq_EAX_T0(target_ulong t0)
5440{
5441 uint64_t r0, r1;
5442
5443 mulu64(&r0, &r1, EAX, t0);
5444 EAX = r0;
5445 EDX = r1;
5446 CC_DST = r0;
5447 CC_SRC = r1;
5448}
5449
5450void helper_imulq_EAX_T0(target_ulong t0)
5451{
5452 uint64_t r0, r1;
5453
5454 muls64(&r0, &r1, EAX, t0);
5455 EAX = r0;
5456 EDX = r1;
5457 CC_DST = r0;
5458 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5459}
5460
5461target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5462{
5463 uint64_t r0, r1;
5464
5465 muls64(&r0, &r1, t0, t1);
5466 CC_DST = r0;
5467 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5468 return r0;
5469}
5470
5471void helper_divq_EAX(target_ulong t0)
5472{
5473 uint64_t r0, r1;
5474 if (t0 == 0) {
5475 raise_exception(EXCP00_DIVZ);
5476 }
5477 r0 = EAX;
5478 r1 = EDX;
5479 if (div64(&r0, &r1, t0))
5480 raise_exception(EXCP00_DIVZ);
5481 EAX = r0;
5482 EDX = r1;
5483}
5484
5485void helper_idivq_EAX(target_ulong t0)
5486{
5487 uint64_t r0, r1;
5488 if (t0 == 0) {
5489 raise_exception(EXCP00_DIVZ);
5490 }
5491 r0 = EAX;
5492 r1 = EDX;
5493 if (idiv64(&r0, &r1, t0))
5494 raise_exception(EXCP00_DIVZ);
5495 EAX = r0;
5496 EDX = r1;
5497}
5498#endif
5499
5500static void do_hlt(void)
5501{
5502 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5503 env->halted = 1;
5504 env->exception_index = EXCP_HLT;
5505 cpu_loop_exit();
5506}
5507
5508void helper_hlt(int next_eip_addend)
5509{
5510 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5511 EIP += next_eip_addend;
5512
5513 do_hlt();
5514}
5515
5516void helper_monitor(target_ulong ptr)
5517{
5518#ifdef VBOX
5519 if ((uint32_t)ECX > 1)
5520 raise_exception(EXCP0D_GPF);
5521#else /* !VBOX */
5522 if ((uint32_t)ECX != 0)
5523 raise_exception(EXCP0D_GPF);
5524#endif /* !VBOX */
5525 /* XXX: store address ? */
5526 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5527}
5528
5529void helper_mwait(int next_eip_addend)
5530{
5531 if ((uint32_t)ECX != 0)
5532 raise_exception(EXCP0D_GPF);
5533#ifdef VBOX
5534 helper_hlt(next_eip_addend);
5535#else /* !VBOX */
5536 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5537 EIP += next_eip_addend;
5538
5539 /* XXX: not complete but not completely erroneous */
5540 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5541 /* more than one CPU: do not sleep because another CPU may
5542 wake this one */
5543 } else {
5544 do_hlt();
5545 }
5546#endif /* !VBOX */
5547}
5548
5549void helper_debug(void)
5550{
5551 env->exception_index = EXCP_DEBUG;
5552 cpu_loop_exit();
5553}
5554
5555void helper_reset_rf(void)
5556{
5557 env->eflags &= ~RF_MASK;
5558}
5559
5560void helper_raise_interrupt(int intno, int next_eip_addend)
5561{
5562 raise_interrupt(intno, 1, 0, next_eip_addend);
5563}
5564
5565void helper_raise_exception(int exception_index)
5566{
5567 raise_exception(exception_index);
5568}
5569
5570void helper_cli(void)
5571{
5572 env->eflags &= ~IF_MASK;
5573}
5574
5575void helper_sti(void)
5576{
5577 env->eflags |= IF_MASK;
5578}
5579
5580#ifdef VBOX
5581void helper_cli_vme(void)
5582{
5583 env->eflags &= ~VIF_MASK;
5584}
5585
5586void helper_sti_vme(void)
5587{
5588 /* First check, then change eflags according to the AMD manual */
5589 if (env->eflags & VIP_MASK) {
5590 raise_exception(EXCP0D_GPF);
5591 }
5592 env->eflags |= VIF_MASK;
5593}
5594#endif /* VBOX */
5595
5596#if 0
5597/* vm86plus instructions */
5598void helper_cli_vm(void)
5599{
5600 env->eflags &= ~VIF_MASK;
5601}
5602
5603void helper_sti_vm(void)
5604{
5605 env->eflags |= VIF_MASK;
5606 if (env->eflags & VIP_MASK) {
5607 raise_exception(EXCP0D_GPF);
5608 }
5609}
5610#endif
5611
5612void helper_set_inhibit_irq(void)
5613{
5614 env->hflags |= HF_INHIBIT_IRQ_MASK;
5615}
5616
5617void helper_reset_inhibit_irq(void)
5618{
5619 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5620}
5621
5622void helper_boundw(target_ulong a0, int v)
5623{
5624 int low, high;
5625 low = ldsw(a0);
5626 high = ldsw(a0 + 2);
5627 v = (int16_t)v;
5628 if (v < low || v > high) {
5629 raise_exception(EXCP05_BOUND);
5630 }
5631}
5632
5633void helper_boundl(target_ulong a0, int v)
5634{
5635 int low, high;
5636 low = ldl(a0);
5637 high = ldl(a0 + 4);
5638 if (v < low || v > high) {
5639 raise_exception(EXCP05_BOUND);
5640 }
5641}
5642
5643static float approx_rsqrt(float a)
5644{
5645 return 1.0 / sqrt(a);
5646}
5647
5648static float approx_rcp(float a)
5649{
5650 return 1.0 / a;
5651}
5652
5653#if !defined(CONFIG_USER_ONLY)
5654
5655#define MMUSUFFIX _mmu
5656
5657#define SHIFT 0
5658#include "softmmu_template.h"
5659
5660#define SHIFT 1
5661#include "softmmu_template.h"
5662
5663#define SHIFT 2
5664#include "softmmu_template.h"
5665
5666#define SHIFT 3
5667#include "softmmu_template.h"
5668
5669#endif
5670
5671#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5672/* This code assumes real physical address always fit into host CPU reg,
5673 which is wrong in general, but true for our current use cases. */
5674RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5675{
5676 return remR3PhysReadS8(addr);
5677}
5678RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5679{
5680 return remR3PhysReadU8(addr);
5681}
5682void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5683{
5684 remR3PhysWriteU8(addr, val);
5685}
5686RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5687{
5688 return remR3PhysReadS16(addr);
5689}
5690RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5691{
5692 return remR3PhysReadU16(addr);
5693}
5694void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5695{
5696 remR3PhysWriteU16(addr, val);
5697}
5698RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5699{
5700 return remR3PhysReadS32(addr);
5701}
5702RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5703{
5704 return remR3PhysReadU32(addr);
5705}
5706void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5707{
5708 remR3PhysWriteU32(addr, val);
5709}
5710uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5711{
5712 return remR3PhysReadU64(addr);
5713}
5714void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5715{
5716 remR3PhysWriteU64(addr, val);
5717}
5718#endif /* VBOX */
5719
5720#if !defined(CONFIG_USER_ONLY)
5721/* try to fill the TLB and return an exception if error. If retaddr is
5722 NULL, it means that the function was called in C code (i.e. not
5723 from generated code or from helper.c) */
5724/* XXX: fix it to restore all registers */
5725void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5726{
5727 TranslationBlock *tb;
5728 int ret;
5729 uintptr_t pc;
5730 CPUX86State *saved_env;
5731
5732 /* XXX: hack to restore env in all cases, even if not called from
5733 generated code */
5734 saved_env = env;
5735 env = cpu_single_env;
5736
5737 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5738 if (ret) {
5739 if (retaddr) {
5740 /* now we have a real cpu fault */
5741 pc = (uintptr_t)retaddr;
5742 tb = tb_find_pc(pc);
5743 if (tb) {
5744 /* the PC is inside the translated code. It means that we have
5745 a virtual CPU fault */
5746 cpu_restore_state(tb, env, pc, NULL);
5747 }
5748 }
5749 raise_exception_err(env->exception_index, env->error_code);
5750 }
5751 env = saved_env;
5752}
5753#endif
5754
5755#ifdef VBOX
5756
5757/**
5758 * Correctly computes the eflags.
5759 * @returns eflags.
5760 * @param env1 CPU environment.
5761 */
5762uint32_t raw_compute_eflags(CPUX86State *env1)
5763{
5764 CPUX86State *savedenv = env;
5765 uint32_t efl;
5766 env = env1;
5767 efl = compute_eflags();
5768 env = savedenv;
5769 return efl;
5770}
5771
5772/**
5773 * Reads byte from virtual address in guest memory area.
5774 * XXX: is it working for any addresses? swapped out pages?
5775 * @returns read data byte.
5776 * @param env1 CPU environment.
5777 * @param pvAddr GC Virtual address.
5778 */
5779uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5780{
5781 CPUX86State *savedenv = env;
5782 uint8_t u8;
5783 env = env1;
5784 u8 = ldub_kernel(addr);
5785 env = savedenv;
5786 return u8;
5787}
5788
5789/**
5790 * Reads byte from virtual address in guest memory area.
5791 * XXX: is it working for any addresses? swapped out pages?
5792 * @returns read data byte.
5793 * @param env1 CPU environment.
5794 * @param pvAddr GC Virtual address.
5795 */
5796uint16_t read_word(CPUX86State *env1, target_ulong addr)
5797{
5798 CPUX86State *savedenv = env;
5799 uint16_t u16;
5800 env = env1;
5801 u16 = lduw_kernel(addr);
5802 env = savedenv;
5803 return u16;
5804}
5805
5806/**
5807 * Reads byte from virtual address in guest memory area.
5808 * XXX: is it working for any addresses? swapped out pages?
5809 * @returns read data byte.
5810 * @param env1 CPU environment.
5811 * @param pvAddr GC Virtual address.
5812 */
5813uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5814{
5815 CPUX86State *savedenv = env;
5816 uint32_t u32;
5817 env = env1;
5818 u32 = ldl_kernel(addr);
5819 env = savedenv;
5820 return u32;
5821}
5822
5823/**
5824 * Writes byte to virtual address in guest memory area.
5825 * XXX: is it working for any addresses? swapped out pages?
5826 * @returns read data byte.
5827 * @param env1 CPU environment.
5828 * @param pvAddr GC Virtual address.
5829 * @param val byte value
5830 */
5831void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5832{
5833 CPUX86State *savedenv = env;
5834 env = env1;
5835 stb(addr, val);
5836 env = savedenv;
5837}
5838
5839void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5840{
5841 CPUX86State *savedenv = env;
5842 env = env1;
5843 stw(addr, val);
5844 env = savedenv;
5845}
5846
5847void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5848{
5849 CPUX86State *savedenv = env;
5850 env = env1;
5851 stl(addr, val);
5852 env = savedenv;
5853}
5854
5855/**
5856 * Correctly loads selector into segment register with updating internal
5857 * qemu data/caches.
5858 * @param env1 CPU environment.
5859 * @param seg_reg Segment register.
5860 * @param selector Selector to load.
5861 */
5862void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5863{
5864 CPUX86State *savedenv = env;
5865#ifdef FORCE_SEGMENT_SYNC
5866 jmp_buf old_buf;
5867#endif
5868
5869 env = env1;
5870
5871 if ( env->eflags & X86_EFL_VM
5872 || !(env->cr[0] & X86_CR0_PE))
5873 {
5874 load_seg_vm(seg_reg, selector);
5875
5876 env = savedenv;
5877
5878 /* Successful sync. */
5879 Assert(env1->segs[seg_reg].newselector == 0);
5880 }
5881 else
5882 {
5883 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5884 time critical - let's not do that */
5885#ifdef FORCE_SEGMENT_SYNC
5886 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5887#endif
5888 if (setjmp(env1->jmp_env) == 0)
5889 {
5890 if (seg_reg == R_CS)
5891 {
5892 uint32_t e1, e2;
5893 e1 = e2 = 0;
5894 load_segment(&e1, &e2, selector);
5895 cpu_x86_load_seg_cache(env, R_CS, selector,
5896 get_seg_base(e1, e2),
5897 get_seg_limit(e1, e2),
5898 e2);
5899 }
5900 else
5901 helper_load_seg(seg_reg, selector);
5902 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5903 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5904
5905 env = savedenv;
5906
5907 /* Successful sync. */
5908 Assert(env1->segs[seg_reg].newselector == 0);
5909 }
5910 else
5911 {
5912 env = savedenv;
5913
5914 /* Postpone sync until the guest uses the selector. */
5915 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5916 env1->segs[seg_reg].newselector = selector;
5917 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5918 env1->exception_index = -1;
5919 env1->error_code = 0;
5920 env1->old_exception = -1;
5921 }
5922#ifdef FORCE_SEGMENT_SYNC
5923 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5924#endif
5925 }
5926
5927}
5928
5929DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5930{
5931 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5932}
5933
5934
5935int emulate_single_instr(CPUX86State *env1)
5936{
5937 TranslationBlock *tb;
5938 TranslationBlock *current;
5939 int flags;
5940 uint8_t *tc_ptr;
5941 target_ulong old_eip;
5942
5943 /* ensures env is loaded! */
5944 CPUX86State *savedenv = env;
5945 env = env1;
5946
5947 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5948
5949 current = env->current_tb;
5950 env->current_tb = NULL;
5951 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5952
5953 /*
5954 * Translate only one instruction.
5955 */
5956 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5957 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5958 env->segs[R_CS].base, flags, 0);
5959
5960 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5961
5962
5963 /* tb_link_phys: */
5964 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5965 tb->jmp_next[0] = NULL;
5966 tb->jmp_next[1] = NULL;
5967 Assert(tb->jmp_next[0] == NULL);
5968 Assert(tb->jmp_next[1] == NULL);
5969 if (tb->tb_next_offset[0] != 0xffff)
5970 tb_reset_jump(tb, 0);
5971 if (tb->tb_next_offset[1] != 0xffff)
5972 tb_reset_jump(tb, 1);
5973
5974 /*
5975 * Execute it using emulation
5976 */
5977 old_eip = env->eip;
5978 env->current_tb = tb;
5979
5980 /*
5981 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5982 * perhaps not a very safe hack
5983 */
5984 while (old_eip == env->eip)
5985 {
5986 tc_ptr = tb->tc_ptr;
5987
5988#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5989 int fake_ret;
5990 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5991#else
5992 tcg_qemu_tb_exec(tc_ptr);
5993#endif
5994
5995 /*
5996 * Exit once we detect an external interrupt and interrupts are enabled
5997 */
5998 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5999 || ( (env->eflags & IF_MASK)
6000 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6001 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6002 )
6003 {
6004 break;
6005 }
6006 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6007 tlb_flush(env, true);
6008 }
6009 }
6010 env->current_tb = current;
6011
6012 tb_phys_invalidate(tb, -1);
6013 tb_free(tb);
6014/*
6015 Assert(tb->tb_next_offset[0] == 0xffff);
6016 Assert(tb->tb_next_offset[1] == 0xffff);
6017 Assert(tb->tb_next[0] == 0xffff);
6018 Assert(tb->tb_next[1] == 0xffff);
6019 Assert(tb->jmp_next[0] == NULL);
6020 Assert(tb->jmp_next[1] == NULL);
6021 Assert(tb->jmp_first == NULL); */
6022
6023 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6024
6025 /*
6026 * Execute the next instruction when we encounter instruction fusing.
6027 */
6028 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6029 {
6030 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6031 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6032 emulate_single_instr(env);
6033 }
6034
6035 env = savedenv;
6036 return 0;
6037}
6038
6039/**
6040 * Correctly loads a new ldtr selector.
6041 *
6042 * @param env1 CPU environment.
6043 * @param selector Selector to load.
6044 */
6045void sync_ldtr(CPUX86State *env1, int selector)
6046{
6047 CPUX86State *saved_env = env;
6048 if (setjmp(env1->jmp_env) == 0)
6049 {
6050 env = env1;
6051 helper_lldt(selector);
6052 env = saved_env;
6053 }
6054 else
6055 {
6056 env = saved_env;
6057#ifdef VBOX_STRICT
6058 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6059#endif
6060 }
6061}
6062
6063int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6064 uint32_t *esp_ptr, int dpl)
6065{
6066 int type, index, shift;
6067
6068 CPUX86State *savedenv = env;
6069 env = env1;
6070
6071 if (!(env->tr.flags & DESC_P_MASK))
6072 cpu_abort(env, "invalid tss");
6073 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6074 if ((type & 7) != 1)
6075 cpu_abort(env, "invalid tss type %d", type);
6076 shift = type >> 3;
6077 index = (dpl * 4 + 2) << shift;
6078 if (index + (4 << shift) - 1 > env->tr.limit)
6079 {
6080 env = savedenv;
6081 return 0;
6082 }
6083 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6084
6085 if (shift == 0) {
6086 *esp_ptr = lduw_kernel(env->tr.base + index);
6087 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6088 } else {
6089 *esp_ptr = ldl_kernel(env->tr.base + index);
6090 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6091 }
6092
6093 env = savedenv;
6094 return 1;
6095}
6096
6097//*****************************************************************************
6098// Needs to be at the bottom of the file (overriding macros)
6099
6100static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6101{
6102#ifdef USE_X86LDOUBLE
6103 CPU86_LDoubleU tmp;
6104 tmp.l.lower = *(uint64_t const *)ptr;
6105 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6106 return tmp.d;
6107#else
6108# error "Busted FPU saving/restoring!"
6109 return *(CPU86_LDouble *)ptr;
6110#endif
6111}
6112
6113static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6114{
6115#ifdef USE_X86LDOUBLE
6116 CPU86_LDoubleU tmp;
6117 tmp.d = f;
6118 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6119 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6120 *(uint16_t *)(ptr + 10) = 0;
6121 *(uint32_t *)(ptr + 12) = 0;
6122 AssertCompile(sizeof(long double) > 8);
6123#else
6124# error "Busted FPU saving/restoring!"
6125 *(CPU86_LDouble *)ptr = f;
6126#endif
6127}
6128
6129#undef stw
6130#undef stl
6131#undef stq
6132#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6133#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6134#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6135
6136//*****************************************************************************
6137void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6138{
6139 int fpus, fptag, i, nb_xmm_regs;
6140 CPU86_LDouble tmp;
6141 uint8_t *addr;
6142 int data64 = !!(env->hflags & HF_LMA_MASK);
6143
6144 if (env->cpuid_features & CPUID_FXSR)
6145 {
6146 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6147 fptag = 0;
6148 for(i = 0; i < 8; i++) {
6149 fptag |= (env->fptags[i] << i);
6150 }
6151 stw(ptr, env->fpuc);
6152 stw(ptr + 2, fpus);
6153 stw(ptr + 4, fptag ^ 0xff);
6154
6155 addr = ptr + 0x20;
6156 for(i = 0;i < 8; i++) {
6157 tmp = ST(i);
6158 helper_fstt_raw(tmp, addr);
6159 addr += 16;
6160 }
6161
6162 if (env->cr[4] & CR4_OSFXSR_MASK) {
6163 /* XXX: finish it */
6164 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6165 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6166 nb_xmm_regs = 8 << data64;
6167 addr = ptr + 0xa0;
6168 for(i = 0; i < nb_xmm_regs; i++) {
6169#if __GNUC__ < 4
6170 stq(addr, env->xmm_regs[i].XMM_Q(0));
6171 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6172#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6173 stl(addr, env->xmm_regs[i].XMM_L(0));
6174 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6175 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6176 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6177#endif
6178 addr += 16;
6179 }
6180 }
6181 }
6182 else
6183 {
6184 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6185 int fptag;
6186
6187 fp->FCW = env->fpuc;
6188 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6189 fptag = 0;
6190 for (i=7; i>=0; i--) {
6191 fptag <<= 2;
6192 if (env->fptags[i]) {
6193 fptag |= 3;
6194 } else {
6195 /* the FPU automatically computes it */
6196 }
6197 }
6198 fp->FTW = fptag;
6199
6200 for(i = 0;i < 8; i++) {
6201 tmp = ST(i);
6202 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6203 }
6204 }
6205}
6206
6207//*****************************************************************************
6208#undef lduw
6209#undef ldl
6210#undef ldq
6211#define lduw(a) *(uint16_t *)(a)
6212#define ldl(a) *(uint32_t *)(a)
6213#define ldq(a) *(uint64_t *)(a)
6214//*****************************************************************************
6215void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6216{
6217 int i, fpus, fptag, nb_xmm_regs;
6218 CPU86_LDouble tmp;
6219 uint8_t *addr;
6220 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6221
6222 if (env->cpuid_features & CPUID_FXSR)
6223 {
6224 env->fpuc = lduw(ptr);
6225 fpus = lduw(ptr + 2);
6226 fptag = lduw(ptr + 4);
6227 env->fpstt = (fpus >> 11) & 7;
6228 env->fpus = fpus & ~0x3800;
6229 fptag ^= 0xff;
6230 for(i = 0;i < 8; i++) {
6231 env->fptags[i] = ((fptag >> i) & 1);
6232 }
6233
6234 addr = ptr + 0x20;
6235 for(i = 0;i < 8; i++) {
6236 tmp = helper_fldt_raw(addr);
6237 ST(i) = tmp;
6238 addr += 16;
6239 }
6240
6241 if (env->cr[4] & CR4_OSFXSR_MASK) {
6242 /* XXX: finish it, endianness */
6243 env->mxcsr = ldl(ptr + 0x18);
6244 //ldl(ptr + 0x1c);
6245 nb_xmm_regs = 8 << data64;
6246 addr = ptr + 0xa0;
6247 for(i = 0; i < nb_xmm_regs; i++) {
6248#if HC_ARCH_BITS == 32
6249 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6250 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6251 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6252 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6253 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6254#else
6255 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6256 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6257#endif
6258 addr += 16;
6259 }
6260 }
6261 }
6262 else
6263 {
6264 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6265 int fptag, j;
6266
6267 env->fpuc = fp->FCW;
6268 env->fpstt = (fp->FSW >> 11) & 7;
6269 env->fpus = fp->FSW & ~0x3800;
6270 fptag = fp->FTW;
6271 for(i = 0;i < 8; i++) {
6272 env->fptags[i] = ((fptag & 3) == 3);
6273 fptag >>= 2;
6274 }
6275 j = env->fpstt;
6276 for(i = 0;i < 8; i++) {
6277 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6278 ST(i) = tmp;
6279 }
6280 }
6281}
6282//*****************************************************************************
6283//*****************************************************************************
6284
6285#endif /* VBOX */
6286
6287/* Secure Virtual Machine helpers */
6288
6289#if defined(CONFIG_USER_ONLY)
6290
6291void helper_vmrun(int aflag, int next_eip_addend)
6292{
6293}
6294void helper_vmmcall(void)
6295{
6296}
6297void helper_vmload(int aflag)
6298{
6299}
6300void helper_vmsave(int aflag)
6301{
6302}
6303void helper_stgi(void)
6304{
6305}
6306void helper_clgi(void)
6307{
6308}
6309void helper_skinit(void)
6310{
6311}
6312void helper_invlpga(int aflag)
6313{
6314}
6315void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6316{
6317}
6318void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6319{
6320}
6321
6322void helper_svm_check_io(uint32_t port, uint32_t param,
6323 uint32_t next_eip_addend)
6324{
6325}
6326#else
6327
6328static inline void svm_save_seg(target_phys_addr_t addr,
6329 const SegmentCache *sc)
6330{
6331 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6332 sc->selector);
6333 stq_phys(addr + offsetof(struct vmcb_seg, base),
6334 sc->base);
6335 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6336 sc->limit);
6337 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6338 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6339}
6340
6341static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6342{
6343 unsigned int flags;
6344
6345 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6346 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6347 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6348 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6349 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6350}
6351
6352static inline void svm_load_seg_cache(target_phys_addr_t addr,
6353 CPUState *env, int seg_reg)
6354{
6355 SegmentCache sc1, *sc = &sc1;
6356 svm_load_seg(addr, sc);
6357 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6358 sc->base, sc->limit, sc->flags);
6359}
6360
6361void helper_vmrun(int aflag, int next_eip_addend)
6362{
6363 target_ulong addr;
6364 uint32_t event_inj;
6365 uint32_t int_ctl;
6366
6367 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6368
6369 if (aflag == 2)
6370 addr = EAX;
6371 else
6372 addr = (uint32_t)EAX;
6373
6374 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6375
6376 env->vm_vmcb = addr;
6377
6378 /* save the current CPU state in the hsave page */
6379 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6380 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6381
6382 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6383 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6384
6385 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6386 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6387 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6388 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6389 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6390 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6391
6392 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6393 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6394
6395 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6396 &env->segs[R_ES]);
6397 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6398 &env->segs[R_CS]);
6399 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6400 &env->segs[R_SS]);
6401 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6402 &env->segs[R_DS]);
6403
6404 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6405 EIP + next_eip_addend);
6406 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6407 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6408
6409 /* load the interception bitmaps so we do not need to access the
6410 vmcb in svm mode */
6411 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6412 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6413 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6414 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6415 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6416 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6417
6418 /* enable intercepts */
6419 env->hflags |= HF_SVMI_MASK;
6420
6421 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6422
6423 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6424 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6425
6426 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6427 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6428
6429 /* clear exit_info_2 so we behave like the real hardware */
6430 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6431
6432 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6433 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6434 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6435 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6436 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6437 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6438 if (int_ctl & V_INTR_MASKING_MASK) {
6439 env->v_tpr = int_ctl & V_TPR_MASK;
6440 env->hflags2 |= HF2_VINTR_MASK;
6441 if (env->eflags & IF_MASK)
6442 env->hflags2 |= HF2_HIF_MASK;
6443 }
6444
6445 cpu_load_efer(env,
6446 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6447 env->eflags = 0;
6448 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6449 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6450 CC_OP = CC_OP_EFLAGS;
6451
6452 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6453 env, R_ES);
6454 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6455 env, R_CS);
6456 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6457 env, R_SS);
6458 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6459 env, R_DS);
6460
6461 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6462 env->eip = EIP;
6463 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6464 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6465 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6466 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6467 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6468
6469 /* FIXME: guest state consistency checks */
6470
6471 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6472 case TLB_CONTROL_DO_NOTHING:
6473 break;
6474 case TLB_CONTROL_FLUSH_ALL_ASID:
6475 /* FIXME: this is not 100% correct but should work for now */
6476 tlb_flush(env, 1);
6477 break;
6478 }
6479
6480 env->hflags2 |= HF2_GIF_MASK;
6481
6482 if (int_ctl & V_IRQ_MASK) {
6483 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6484 }
6485
6486 /* maybe we need to inject an event */
6487 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6488 if (event_inj & SVM_EVTINJ_VALID) {
6489 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6490 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6491 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6492
6493 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6494 /* FIXME: need to implement valid_err */
6495 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6496 case SVM_EVTINJ_TYPE_INTR:
6497 env->exception_index = vector;
6498 env->error_code = event_inj_err;
6499 env->exception_is_int = 0;
6500 env->exception_next_eip = -1;
6501 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6502 /* XXX: is it always correct ? */
6503 do_interrupt(vector, 0, 0, 0, 1);
6504 break;
6505 case SVM_EVTINJ_TYPE_NMI:
6506 env->exception_index = EXCP02_NMI;
6507 env->error_code = event_inj_err;
6508 env->exception_is_int = 0;
6509 env->exception_next_eip = EIP;
6510 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6511 cpu_loop_exit();
6512 break;
6513 case SVM_EVTINJ_TYPE_EXEPT:
6514 env->exception_index = vector;
6515 env->error_code = event_inj_err;
6516 env->exception_is_int = 0;
6517 env->exception_next_eip = -1;
6518 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6519 cpu_loop_exit();
6520 break;
6521 case SVM_EVTINJ_TYPE_SOFT:
6522 env->exception_index = vector;
6523 env->error_code = event_inj_err;
6524 env->exception_is_int = 1;
6525 env->exception_next_eip = EIP;
6526 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6527 cpu_loop_exit();
6528 break;
6529 }
6530 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6531 }
6532}
6533
6534void helper_vmmcall(void)
6535{
6536 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6537 raise_exception(EXCP06_ILLOP);
6538}
6539
6540void helper_vmload(int aflag)
6541{
6542 target_ulong addr;
6543 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6544
6545 if (aflag == 2)
6546 addr = EAX;
6547 else
6548 addr = (uint32_t)EAX;
6549
6550 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6551 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6552 env->segs[R_FS].base);
6553
6554 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6555 env, R_FS);
6556 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6557 env, R_GS);
6558 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6559 &env->tr);
6560 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6561 &env->ldt);
6562
6563#ifdef TARGET_X86_64
6564 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6565 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6566 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6567 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6568#endif
6569 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6570 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6571 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6572 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6573}
6574
6575void helper_vmsave(int aflag)
6576{
6577 target_ulong addr;
6578 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6579
6580 if (aflag == 2)
6581 addr = EAX;
6582 else
6583 addr = (uint32_t)EAX;
6584
6585 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6586 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6587 env->segs[R_FS].base);
6588
6589 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6590 &env->segs[R_FS]);
6591 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6592 &env->segs[R_GS]);
6593 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6594 &env->tr);
6595 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6596 &env->ldt);
6597
6598#ifdef TARGET_X86_64
6599 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6600 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6601 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6602 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6603#endif
6604 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6605 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6606 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6607 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6608}
6609
6610void helper_stgi(void)
6611{
6612 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6613 env->hflags2 |= HF2_GIF_MASK;
6614}
6615
6616void helper_clgi(void)
6617{
6618 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6619 env->hflags2 &= ~HF2_GIF_MASK;
6620}
6621
6622void helper_skinit(void)
6623{
6624 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6625 /* XXX: not implemented */
6626 raise_exception(EXCP06_ILLOP);
6627}
6628
6629void helper_invlpga(int aflag)
6630{
6631 target_ulong addr;
6632 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6633
6634 if (aflag == 2)
6635 addr = EAX;
6636 else
6637 addr = (uint32_t)EAX;
6638
6639 /* XXX: could use the ASID to see if it is needed to do the
6640 flush */
6641 tlb_flush_page(env, addr);
6642}
6643
6644void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6645{
6646 if (likely(!(env->hflags & HF_SVMI_MASK)))
6647 return;
6648#ifndef VBOX
6649 switch(type) {
6650 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6651 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6652 helper_vmexit(type, param);
6653 }
6654 break;
6655 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6656 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6657 helper_vmexit(type, param);
6658 }
6659 break;
6660 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6661 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6662 helper_vmexit(type, param);
6663 }
6664 break;
6665 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6666 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6667 helper_vmexit(type, param);
6668 }
6669 break;
6670 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6671 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6672 helper_vmexit(type, param);
6673 }
6674 break;
6675 case SVM_EXIT_MSR:
6676 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6677 /* FIXME: this should be read in at vmrun (faster this way?) */
6678 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6679 uint32_t t0, t1;
6680 switch((uint32_t)ECX) {
6681 case 0 ... 0x1fff:
6682 t0 = (ECX * 2) % 8;
6683 t1 = ECX / 8;
6684 break;
6685 case 0xc0000000 ... 0xc0001fff:
6686 t0 = (8192 + ECX - 0xc0000000) * 2;
6687 t1 = (t0 / 8);
6688 t0 %= 8;
6689 break;
6690 case 0xc0010000 ... 0xc0011fff:
6691 t0 = (16384 + ECX - 0xc0010000) * 2;
6692 t1 = (t0 / 8);
6693 t0 %= 8;
6694 break;
6695 default:
6696 helper_vmexit(type, param);
6697 t0 = 0;
6698 t1 = 0;
6699 break;
6700 }
6701 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6702 helper_vmexit(type, param);
6703 }
6704 break;
6705 default:
6706 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6707 helper_vmexit(type, param);
6708 }
6709 break;
6710 }
6711#else /* VBOX */
6712 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6713#endif /* VBOX */
6714}
6715
6716void helper_svm_check_io(uint32_t port, uint32_t param,
6717 uint32_t next_eip_addend)
6718{
6719 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6720 /* FIXME: this should be read in at vmrun (faster this way?) */
6721 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6722 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6723 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6724 /* next EIP */
6725 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6726 env->eip + next_eip_addend);
6727 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6728 }
6729 }
6730}
6731
6732/* Note: currently only 32 bits of exit_code are used */
6733void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6734{
6735 uint32_t int_ctl;
6736
6737 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6738 exit_code, exit_info_1,
6739 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6740 EIP);
6741
6742 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6743 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6744 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6745 } else {
6746 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6747 }
6748
6749 /* Save the VM state in the vmcb */
6750 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6751 &env->segs[R_ES]);
6752 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6753 &env->segs[R_CS]);
6754 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6755 &env->segs[R_SS]);
6756 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6757 &env->segs[R_DS]);
6758
6759 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6760 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6761
6762 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6763 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6764
6765 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6766 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6767 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6768 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6769 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6770
6771 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6772 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6773 int_ctl |= env->v_tpr & V_TPR_MASK;
6774 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6775 int_ctl |= V_IRQ_MASK;
6776 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6777
6778 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6779 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6780 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6781 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6782 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6783 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6784 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6785
6786 /* Reload the host state from vm_hsave */
6787 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6788 env->hflags &= ~HF_SVMI_MASK;
6789 env->intercept = 0;
6790 env->intercept_exceptions = 0;
6791 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6792 env->tsc_offset = 0;
6793
6794 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6795 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6796
6797 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6798 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6799
6800 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6801 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6802 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6803 /* we need to set the efer after the crs so the hidden flags get
6804 set properly */
6805 cpu_load_efer(env,
6806 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6807 env->eflags = 0;
6808 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6809 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6810 CC_OP = CC_OP_EFLAGS;
6811
6812 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6813 env, R_ES);
6814 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6815 env, R_CS);
6816 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6817 env, R_SS);
6818 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6819 env, R_DS);
6820
6821 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6822 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6823 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6824
6825 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6826 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6827
6828 /* other setups */
6829 cpu_x86_set_cpl(env, 0);
6830 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6831 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6832
6833 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6834 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6835 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6836 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6837 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6838
6839 env->hflags2 &= ~HF2_GIF_MASK;
6840 /* FIXME: Resets the current ASID register to zero (host ASID). */
6841
6842 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6843
6844 /* Clears the TSC_OFFSET inside the processor. */
6845
6846 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6847 from the page table indicated the host's CR3. If the PDPEs contain
6848 illegal state, the processor causes a shutdown. */
6849
6850 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6851 env->cr[0] |= CR0_PE_MASK;
6852 env->eflags &= ~VM_MASK;
6853
6854 /* Disables all breakpoints in the host DR7 register. */
6855
6856 /* Checks the reloaded host state for consistency. */
6857
6858 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6859 host's code segment or non-canonical (in the case of long mode), a
6860 #GP fault is delivered inside the host.) */
6861
6862 /* remove any pending exception */
6863 env->exception_index = -1;
6864 env->error_code = 0;
6865 env->old_exception = -1;
6866
6867 cpu_loop_exit();
6868}
6869
6870#endif
6871
6872/* MMX/SSE */
6873/* XXX: optimize by storing fptt and fptags in the static cpu state */
6874void helper_enter_mmx(void)
6875{
6876 env->fpstt = 0;
6877 *(uint32_t *)(env->fptags) = 0;
6878 *(uint32_t *)(env->fptags + 4) = 0;
6879}
6880
6881void helper_emms(void)
6882{
6883 /* set to empty state */
6884 *(uint32_t *)(env->fptags) = 0x01010101;
6885 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6886}
6887
6888/* XXX: suppress */
6889void helper_movq(void *d, void *s)
6890{
6891 *(uint64_t *)d = *(uint64_t *)s;
6892}
6893
6894#define SHIFT 0
6895#include "ops_sse.h"
6896
6897#define SHIFT 1
6898#include "ops_sse.h"
6899
6900#define SHIFT 0
6901#include "helper_template.h"
6902#undef SHIFT
6903
6904#define SHIFT 1
6905#include "helper_template.h"
6906#undef SHIFT
6907
6908#define SHIFT 2
6909#include "helper_template.h"
6910#undef SHIFT
6911
6912#ifdef TARGET_X86_64
6913
6914#define SHIFT 3
6915#include "helper_template.h"
6916#undef SHIFT
6917
6918#endif
6919
6920/* bit operations */
6921target_ulong helper_bsf(target_ulong t0)
6922{
6923 int count;
6924 target_ulong res;
6925
6926 res = t0;
6927 count = 0;
6928 while ((res & 1) == 0) {
6929 count++;
6930 res >>= 1;
6931 }
6932 return count;
6933}
6934
6935target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6936{
6937 int count;
6938 target_ulong res, mask;
6939
6940 if (wordsize > 0 && t0 == 0) {
6941 return wordsize;
6942 }
6943 res = t0;
6944 count = TARGET_LONG_BITS - 1;
6945 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6946 while ((res & mask) == 0) {
6947 count--;
6948 res <<= 1;
6949 }
6950 if (wordsize > 0) {
6951 return wordsize - 1 - count;
6952 }
6953 return count;
6954}
6955
6956target_ulong helper_bsr(target_ulong t0)
6957{
6958 return helper_lzcnt(t0, 0);
6959}
6960
6961static int compute_all_eflags(void)
6962{
6963 return CC_SRC;
6964}
6965
6966static int compute_c_eflags(void)
6967{
6968 return CC_SRC & CC_C;
6969}
6970
6971uint32_t helper_cc_compute_all(int op)
6972{
6973 switch (op) {
6974 default: /* should never happen */ return 0;
6975
6976 case CC_OP_EFLAGS: return compute_all_eflags();
6977
6978 case CC_OP_MULB: return compute_all_mulb();
6979 case CC_OP_MULW: return compute_all_mulw();
6980 case CC_OP_MULL: return compute_all_mull();
6981
6982 case CC_OP_ADDB: return compute_all_addb();
6983 case CC_OP_ADDW: return compute_all_addw();
6984 case CC_OP_ADDL: return compute_all_addl();
6985
6986 case CC_OP_ADCB: return compute_all_adcb();
6987 case CC_OP_ADCW: return compute_all_adcw();
6988 case CC_OP_ADCL: return compute_all_adcl();
6989
6990 case CC_OP_SUBB: return compute_all_subb();
6991 case CC_OP_SUBW: return compute_all_subw();
6992 case CC_OP_SUBL: return compute_all_subl();
6993
6994 case CC_OP_SBBB: return compute_all_sbbb();
6995 case CC_OP_SBBW: return compute_all_sbbw();
6996 case CC_OP_SBBL: return compute_all_sbbl();
6997
6998 case CC_OP_LOGICB: return compute_all_logicb();
6999 case CC_OP_LOGICW: return compute_all_logicw();
7000 case CC_OP_LOGICL: return compute_all_logicl();
7001
7002 case CC_OP_INCB: return compute_all_incb();
7003 case CC_OP_INCW: return compute_all_incw();
7004 case CC_OP_INCL: return compute_all_incl();
7005
7006 case CC_OP_DECB: return compute_all_decb();
7007 case CC_OP_DECW: return compute_all_decw();
7008 case CC_OP_DECL: return compute_all_decl();
7009
7010 case CC_OP_SHLB: return compute_all_shlb();
7011 case CC_OP_SHLW: return compute_all_shlw();
7012 case CC_OP_SHLL: return compute_all_shll();
7013
7014 case CC_OP_SARB: return compute_all_sarb();
7015 case CC_OP_SARW: return compute_all_sarw();
7016 case CC_OP_SARL: return compute_all_sarl();
7017
7018#ifdef TARGET_X86_64
7019 case CC_OP_MULQ: return compute_all_mulq();
7020
7021 case CC_OP_ADDQ: return compute_all_addq();
7022
7023 case CC_OP_ADCQ: return compute_all_adcq();
7024
7025 case CC_OP_SUBQ: return compute_all_subq();
7026
7027 case CC_OP_SBBQ: return compute_all_sbbq();
7028
7029 case CC_OP_LOGICQ: return compute_all_logicq();
7030
7031 case CC_OP_INCQ: return compute_all_incq();
7032
7033 case CC_OP_DECQ: return compute_all_decq();
7034
7035 case CC_OP_SHLQ: return compute_all_shlq();
7036
7037 case CC_OP_SARQ: return compute_all_sarq();
7038#endif
7039 }
7040}
7041
7042uint32_t helper_cc_compute_c(int op)
7043{
7044 switch (op) {
7045 default: /* should never happen */ return 0;
7046
7047 case CC_OP_EFLAGS: return compute_c_eflags();
7048
7049 case CC_OP_MULB: return compute_c_mull();
7050 case CC_OP_MULW: return compute_c_mull();
7051 case CC_OP_MULL: return compute_c_mull();
7052
7053 case CC_OP_ADDB: return compute_c_addb();
7054 case CC_OP_ADDW: return compute_c_addw();
7055 case CC_OP_ADDL: return compute_c_addl();
7056
7057 case CC_OP_ADCB: return compute_c_adcb();
7058 case CC_OP_ADCW: return compute_c_adcw();
7059 case CC_OP_ADCL: return compute_c_adcl();
7060
7061 case CC_OP_SUBB: return compute_c_subb();
7062 case CC_OP_SUBW: return compute_c_subw();
7063 case CC_OP_SUBL: return compute_c_subl();
7064
7065 case CC_OP_SBBB: return compute_c_sbbb();
7066 case CC_OP_SBBW: return compute_c_sbbw();
7067 case CC_OP_SBBL: return compute_c_sbbl();
7068
7069 case CC_OP_LOGICB: return compute_c_logicb();
7070 case CC_OP_LOGICW: return compute_c_logicw();
7071 case CC_OP_LOGICL: return compute_c_logicl();
7072
7073 case CC_OP_INCB: return compute_c_incl();
7074 case CC_OP_INCW: return compute_c_incl();
7075 case CC_OP_INCL: return compute_c_incl();
7076
7077 case CC_OP_DECB: return compute_c_incl();
7078 case CC_OP_DECW: return compute_c_incl();
7079 case CC_OP_DECL: return compute_c_incl();
7080
7081 case CC_OP_SHLB: return compute_c_shlb();
7082 case CC_OP_SHLW: return compute_c_shlw();
7083 case CC_OP_SHLL: return compute_c_shll();
7084
7085 case CC_OP_SARB: return compute_c_sarl();
7086 case CC_OP_SARW: return compute_c_sarl();
7087 case CC_OP_SARL: return compute_c_sarl();
7088
7089#ifdef TARGET_X86_64
7090 case CC_OP_MULQ: return compute_c_mull();
7091
7092 case CC_OP_ADDQ: return compute_c_addq();
7093
7094 case CC_OP_ADCQ: return compute_c_adcq();
7095
7096 case CC_OP_SUBQ: return compute_c_subq();
7097
7098 case CC_OP_SBBQ: return compute_c_sbbq();
7099
7100 case CC_OP_LOGICQ: return compute_c_logicq();
7101
7102 case CC_OP_INCQ: return compute_c_incl();
7103
7104 case CC_OP_DECQ: return compute_c_incl();
7105
7106 case CC_OP_SHLQ: return compute_c_shlq();
7107
7108 case CC_OP_SARQ: return compute_c_sarl();
7109#endif
7110 }
7111}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette