VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 13762

Last change on this file since 13762 was 13117, checked in by vboxsync, 16 years ago

recompiler: some logging fixes

  • Property svn:eol-style set to native
File size: 69.6 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32
33#if !defined(CONFIG_SOFTMMU)
34#undef EAX
35#undef ECX
36#undef EDX
37#undef EBX
38#undef ESP
39#undef EBP
40#undef ESI
41#undef EDI
42#undef EIP
43#include <signal.h>
44#include <sys/ucontext.h>
45#endif
46
47int tb_invalidated_flag;
48
49//#define DEBUG_EXEC
50//#define DEBUG_SIGNAL
51
52#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K)
53/* XXX: unify with i386 target */
54void cpu_loop_exit(void)
55{
56 longjmp(env->jmp_env, 1);
57}
58#endif
59#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
60#define reg_T2
61#endif
62
63/* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
65 */
66void cpu_resume_from_signal(CPUState *env1, void *puc)
67{
68#if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70#endif
71
72 env = env1;
73
74 /* XXX: restore cpu registers saved in host registers */
75
76#if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
80 }
81#endif
82 longjmp(env->jmp_env, 1);
83}
84
85
86static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
88 unsigned int flags)
89{
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
95
96 spin_lock(&tb_lock);
97
98 tb_invalidated_flag = 0;
99
100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
101
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
112 if (tb->pc == pc &&
113 tb->page_addr[0] == phys_page1 &&
114 tb->cs_base == cs_base &&
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
118 virt_page2 = (pc & TARGET_PAGE_MASK) +
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
125 }
126 }
127 ptb1 = &tb->phys_hash_next;
128 }
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
138 tb_invalidated_flag = 1;
139 }
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
144 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
146
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
152 }
153 tb_link_phys(tb, phys_pc, phys_page2);
154
155 found:
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
160}
161
162static inline TranslationBlock *tb_find_fast(void)
163{
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
166 unsigned int flags;
167
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171#if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 cs_base = env->segs[R_CS].base;
175 pc = cs_base + env->eip;
176#elif defined(TARGET_ARM)
177 flags = env->thumb | (env->vfp.vec_len << 1)
178 | (env->vfp.vec_stride << 4);
179 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
180 flags |= (1 << 6);
181 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
182 flags |= (1 << 7);
183 cs_base = 0;
184 pc = env->regs[15];
185#elif defined(TARGET_SPARC)
186#ifdef TARGET_SPARC64
187 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
189 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
190#else
191 // FPU enable . MMU enabled . MMU no-fault . Supervisor
192 flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
193 | env->psrs;
194#endif
195 cs_base = env->npc;
196 pc = env->pc;
197#elif defined(TARGET_PPC)
198 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
199 (msr_se << MSR_SE) | (msr_le << MSR_LE);
200 cs_base = 0;
201 pc = env->nip;
202#elif defined(TARGET_MIPS)
203 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
204 cs_base = 0;
205 pc = env->PC;
206#elif defined(TARGET_M68K)
207 flags = env->fpcr & M68K_FPCR_PREC;
208 cs_base = 0;
209 pc = env->pc;
210#elif defined(TARGET_SH4)
211 flags = env->sr & (SR_MD | SR_RB);
212 cs_base = 0; /* XXXXX */
213 pc = env->pc;
214#else
215#error unsupported CPU
216#endif
217 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
218 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
219 tb->flags != flags, 0)) {
220 tb = tb_find_slow(pc, cs_base, flags);
221 /* Note: we do it here to avoid a gcc bug on Mac OS X when
222 doing it in tb_find_slow */
223 if (tb_invalidated_flag) {
224 /* as some TB could have been invalidated because
225 of memory exceptions while generating the code, we
226 must recompute the hash index here */
227 T0 = 0;
228 }
229 }
230 return tb;
231}
232
233
234/* main execution loop */
235
236#ifdef VBOX
237
238int cpu_exec(CPUState *env1)
239{
240#define DECLARE_HOST_REGS 1
241#include "hostregs_helper.h"
242 int ret, interrupt_request;
243 void (*gen_func)(void);
244 TranslationBlock *tb;
245 uint8_t *tc_ptr;
246
247#if defined(TARGET_I386)
248 /* handle exit of HALTED state */
249 if (env1->hflags & HF_HALTED_MASK) {
250 /* disable halt condition */
251 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
252 (env1->eflags & IF_MASK)) {
253 env1->hflags &= ~HF_HALTED_MASK;
254 } else {
255 return EXCP_HALTED;
256 }
257 }
258#elif defined(TARGET_PPC)
259 if (env1->halted) {
260 if (env1->msr[MSR_EE] &&
261 (env1->interrupt_request &
262 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
263 env1->halted = 0;
264 } else {
265 return EXCP_HALTED;
266 }
267 }
268#elif defined(TARGET_SPARC)
269 if (env1->halted) {
270 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
271 (env1->psret != 0)) {
272 env1->halted = 0;
273 } else {
274 return EXCP_HALTED;
275 }
276 }
277#elif defined(TARGET_ARM)
278 if (env1->halted) {
279 /* An interrupt wakes the CPU even if the I and F CPSR bits are
280 set. */
281 if (env1->interrupt_request
282 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
283 env1->halted = 0;
284 } else {
285 return EXCP_HALTED;
286 }
287 }
288#elif defined(TARGET_MIPS)
289 if (env1->halted) {
290 if (env1->interrupt_request &
291 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
292 env1->halted = 0;
293 } else {
294 return EXCP_HALTED;
295 }
296 }
297#endif
298
299 cpu_single_env = env1;
300
301 /* first we save global registers */
302#define SAVE_HOST_REGS 1
303#include "hostregs_helper.h"
304 env = env1;
305#if defined(__sparc__) && !defined(HOST_SOLARIS)
306 /* we also save i7 because longjmp may not restore it */
307 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
308#endif
309
310#if defined(TARGET_I386)
311
312 env_to_regs();
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318#elif defined(TARGET_ARM)
319#elif defined(TARGET_SPARC)
320#if defined(reg_REGWPTR)
321 saved_regwptr = REGWPTR;
322#endif
323#elif defined(TARGET_PPC)
324#elif defined(TARGET_MIPS)
325#elif defined(TARGET_SH4)
326 /* XXXXX */
327#else
328#error unsupported target CPU
329#endif
330#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
331 env->exception_index = -1;
332#endif
333
334 /* prepare setjmp context for exception handling */
335 for(;;) {
336 if (setjmp(env->jmp_env) == 0)
337 {
338 env->current_tb = NULL;
339 VMMR3Unlock(env->pVM);
340 VMMR3Lock(env->pVM);
341
342 /*
343 * Check for fatal errors first
344 */
345 if (env->interrupt_request & CPU_INTERRUPT_RC) {
346 env->exception_index = EXCP_RC;
347 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
348 ret = env->exception_index;
349 cpu_loop_exit();
350 }
351
352 /* if an exception is pending, we execute it here */
353 if (env->exception_index >= 0) {
354 Assert(!env->user_mode_only);
355 if (env->exception_index >= EXCP_INTERRUPT) {
356 /* exit request from the cpu execution loop */
357 ret = env->exception_index;
358 break;
359 } else {
360 /* simulate a real cpu exception. On i386, it can
361 trigger new exceptions, but we do not handle
362 double or triple faults yet. */
363 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
364 Log(("do_interrupt %d %d %VGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
367 env->error_code,
368 env->exception_next_eip, 0);
369 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
370 }
371 env->exception_index = -1;
372 }
373
374 T0 = 0; /* force lookup of first TB */
375 for(;;)
376 {
377 interrupt_request = env->interrupt_request;
378 if (__builtin_expect(interrupt_request, 0))
379 {
380 /* Single instruction exec request, we execute it and return (one way or the other).
381 The caller will always reschedule after doing this operation! */
382 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
383 {
384 /* not in flight are we? (if we are, we trapped) */
385 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
386 {
387 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
388 env->exception_index = EXCP_SINGLE_INSTR;
389 if (emulate_single_instr(env) == -1)
390 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%VGv!!\n", env->eip));
391
392 /* When we receive an external interrupt during execution of this single
393 instruction, then we should stay here. We will leave when we're ready
394 for raw-mode or when interrupted by pending EMT requests. */
395 interrupt_request = env->interrupt_request; /* reload this! */
396 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
397 || !(env->eflags & IF_MASK)
398 || (env->hflags & HF_INHIBIT_IRQ_MASK)
399 || (env->state & CPU_RAW_HWACC)
400 )
401 {
402 env->exception_index = ret = EXCP_SINGLE_INSTR;
403 cpu_loop_exit();
404 }
405 }
406 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
407 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
408 }
409
410 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
411 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
412 !(env->hflags & HF_SMM_MASK)) {
413 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
414 do_smm_enter();
415 T0 = 0;
416 }
417 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
418 (env->eflags & IF_MASK) &&
419 !(env->hflags & HF_INHIBIT_IRQ_MASK))
420 {
421 /* if hardware interrupt pending, we execute it */
422 int intno;
423 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
424 intno = cpu_get_pic_interrupt(env);
425 if (intno >= 0)
426 {
427 Log(("do_interrupt %d\n", intno));
428 do_interrupt(intno, 0, 0, 0, 1);
429 }
430 /* ensure that no TB jump will be modified as
431 the program flow was changed */
432 T0 = 0;
433 }
434 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
435 {
436 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
437 /* ensure that no TB jump will be modified as
438 the program flow was changed */
439 T0 = 0;
440 }
441 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
442 if (interrupt_request & CPU_INTERRUPT_EXIT)
443 {
444 env->exception_index = EXCP_INTERRUPT;
445 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
446 ret = env->exception_index;
447 cpu_loop_exit();
448 }
449 if (interrupt_request & CPU_INTERRUPT_RC)
450 {
451 env->exception_index = EXCP_RC;
452 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
453 ret = env->exception_index;
454 cpu_loop_exit();
455 }
456 }
457
458 /*
459 * Check if we the CPU state allows us to execute the code in raw-mode.
460 */
461 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
462 if (remR3CanExecuteRaw(env,
463 env->eip + env->segs[R_CS].base,
464 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
465 &env->exception_index))
466 {
467 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
468 ret = env->exception_index;
469 cpu_loop_exit();
470 }
471 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
472
473 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
474 tb = tb_find_fast();
475
476 /* see if we can patch the calling TB. When the TB
477 spans two pages, we cannot safely do a direct
478 jump. */
479 if (T0 != 0
480 && !(tb->cflags & CF_RAW_MODE)
481 && tb->page_addr[1] == -1)
482 {
483 spin_lock(&tb_lock);
484 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
485 spin_unlock(&tb_lock);
486 }
487 tc_ptr = tb->tc_ptr;
488 env->current_tb = tb;
489 /* execute the generated code */
490 gen_func = (void *)tc_ptr;
491 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
492
493#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
494#if !defined(DEBUG_bird)
495 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
496 {
497 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
498 {
499 Log(("EMR0: %VGv ESP=%VGv IF=%d TF=%d CPL=%d\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
500 }
501 }
502 else
503 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
504 {
505 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
506 {
507 if(env->eflags & VM_MASK)
508 {
509 Log(("EMV86: %04X:%VGv IF=%d TF=%d CPL=%d CR0=%RGr\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0]));
510 }
511 else
512 {
513 Log(("EMR3: %VGv ESP=%VGv IF=%d TF=%d CPL=%d IOPL=%d CR0=%RGr\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), env->cr[0]));
514 }
515 }
516 }
517 else
518 {
519 /* Seriously slows down realmode booting. */
520 LogFlow(("EMRM: %04X:%VGv SS:ESP=%04X:%VGv IF=%d TF=%d CPL=%d PE=%d PG=%d\n", env->segs[R_CS].selector, env->eip, env->segs[R_SS].selector, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0] & X86_CR0_PE, env->cr[0] & X86_CR0_PG));
521 }
522#endif /* !DEBUG_bird */
523 if(env->state & CPU_EMULATE_SINGLE_STEP)
524 {
525#ifdef DEBUG_bird
526 static int s_cTimes = 0;
527 if (s_cTimes++ > 1000000)
528 {
529 RTLogPrintf("Enough stepping!\n");
530 #if 0
531 env->exception_index = EXCP_DEBUG;
532 ret = env->exception_index;
533 cpu_loop_exit();
534 #else
535 env->state &= ~CPU_EMULATE_SINGLE_STEP;
536 #endif
537 }
538#endif
539 TMCpuTickPause(env->pVM);
540 remR3DisasInstr(env, -1, NULL);
541 TMCpuTickResume(env->pVM);
542 if(emulate_single_instr(env) == -1)
543 {
544 Log(("emulate_single_instr failed for EIP=%VGv!!\n", env->eip));
545 }
546 }
547 else
548 {
549 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
550 gen_func();
551 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
552 }
553#else /* !DEBUG || !VBOX || DEBUG_dmik */
554
555 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
556 gen_func();
557 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
558
559#endif /* !DEBUG || !VBOX || DEBUG_dmik */
560 env->current_tb = NULL;
561 /* reset soft MMU for next block (it can currently
562 only be set by a memory fault) */
563#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
564 if (env->hflags & HF_SOFTMMU_MASK) {
565 env->hflags &= ~HF_SOFTMMU_MASK;
566 /* do not allow linking to another block */
567 T0 = 0;
568 }
569#endif
570 }
571 } else {
572 env_to_regs();
573 }
574#ifdef VBOX_HIGH_RES_TIMERS_HACK
575 /* NULL the current_tb here so cpu_interrupt() doesn't do
576 anything unnecessary (like crashing during emulate single instruction). */
577 env->current_tb = NULL;
578 TMTimerPoll(env1->pVM);
579#endif
580 } /* for(;;) */
581
582#if defined(TARGET_I386)
583 /* restore flags in standard format */
584 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
585#else
586#error unsupported target CPU
587#endif
588#include "hostregs_helper.h"
589 return ret;
590}
591
592
593#else /* !VBOX */
594
595
596int cpu_exec(CPUState *env1)
597{
598#define DECLARE_HOST_REGS 1
599#include "hostregs_helper.h"
600#if defined(__sparc__) && !defined(HOST_SOLARIS)
601 int saved_i7;
602 target_ulong tmp_T0;
603#endif
604 int ret, interrupt_request;
605 void (*gen_func)(void);
606 TranslationBlock *tb;
607 uint8_t *tc_ptr;
608
609#if defined(TARGET_I386)
610 /* handle exit of HALTED state */
611 if (env1->hflags & HF_HALTED_MASK) {
612 /* disable halt condition */
613 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
614 (env1->eflags & IF_MASK)) {
615 env1->hflags &= ~HF_HALTED_MASK;
616 } else {
617 return EXCP_HALTED;
618 }
619 }
620#elif defined(TARGET_PPC)
621 if (env1->halted) {
622 if (env1->msr[MSR_EE] &&
623 (env1->interrupt_request &
624 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
625 env1->halted = 0;
626 } else {
627 return EXCP_HALTED;
628 }
629 }
630#elif defined(TARGET_SPARC)
631 if (env1->halted) {
632 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
633 (env1->psret != 0)) {
634 env1->halted = 0;
635 } else {
636 return EXCP_HALTED;
637 }
638 }
639#elif defined(TARGET_ARM)
640 if (env1->halted) {
641 /* An interrupt wakes the CPU even if the I and F CPSR bits are
642 set. */
643 if (env1->interrupt_request
644 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
645 env1->halted = 0;
646 } else {
647 return EXCP_HALTED;
648 }
649 }
650#elif defined(TARGET_MIPS)
651 if (env1->halted) {
652 if (env1->interrupt_request &
653 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
654 env1->halted = 0;
655 } else {
656 return EXCP_HALTED;
657 }
658 }
659#endif
660
661 cpu_single_env = env1;
662
663 /* first we save global registers */
664#define SAVE_HOST_REGS 1
665#include "hostregs_helper.h"
666 env = env1;
667#if defined(__sparc__) && !defined(HOST_SOLARIS)
668 /* we also save i7 because longjmp may not restore it */
669 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
670#endif
671
672#if defined(TARGET_I386)
673 env_to_regs();
674 /* put eflags in CPU temporary format */
675 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
676 DF = 1 - (2 * ((env->eflags >> 10) & 1));
677 CC_OP = CC_OP_EFLAGS;
678 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
679#elif defined(TARGET_ARM)
680#elif defined(TARGET_SPARC)
681#if defined(reg_REGWPTR)
682 saved_regwptr = REGWPTR;
683#endif
684#elif defined(TARGET_PPC)
685#elif defined(TARGET_M68K)
686 env->cc_op = CC_OP_FLAGS;
687 env->cc_dest = env->sr & 0xf;
688 env->cc_x = (env->sr >> 4) & 1;
689#elif defined(TARGET_MIPS)
690#elif defined(TARGET_SH4)
691 /* XXXXX */
692#else
693#error unsupported target CPU
694#endif
695#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
696 env->exception_index = -1;
697#endif
698
699 /* prepare setjmp context for exception handling */
700 for(;;) {
701 if (setjmp(env->jmp_env) == 0) {
702 env->current_tb = NULL;
703#ifdef VBOX
704 VMMR3Unlock(env->pVM);
705 VMMR3Lock(env->pVM);
706
707 /* Check for high priority requests first (like fatal
708 errors). */
709 if (env->interrupt_request & CPU_INTERRUPT_RC) {
710 env->exception_index = EXCP_RC;
711 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
712 ret = env->exception_index;
713 cpu_loop_exit();
714 }
715#endif /* VBOX */
716
717
718 /* if an exception is pending, we execute it here */
719 if (env->exception_index >= 0) {
720 if (env->exception_index >= EXCP_INTERRUPT) {
721 /* exit request from the cpu execution loop */
722 ret = env->exception_index;
723 break;
724 } else if (env->user_mode_only) {
725 /* if user mode only, we simulate a fake exception
726 which will be handled outside the cpu execution
727 loop */
728#if defined(TARGET_I386)
729 do_interrupt_user(env->exception_index,
730 env->exception_is_int,
731 env->error_code,
732 env->exception_next_eip);
733#endif
734 ret = env->exception_index;
735 break;
736 } else {
737#if defined(TARGET_I386)
738 /* simulate a real cpu exception. On i386, it can
739 trigger new exceptions, but we do not handle
740 double or triple faults yet. */
741 do_interrupt(env->exception_index,
742 env->exception_is_int,
743 env->error_code,
744 env->exception_next_eip, 0);
745#elif defined(TARGET_PPC)
746 do_interrupt(env);
747#elif defined(TARGET_MIPS)
748 do_interrupt(env);
749#elif defined(TARGET_SPARC)
750 do_interrupt(env->exception_index);
751#elif defined(TARGET_ARM)
752 do_interrupt(env);
753#elif defined(TARGET_SH4)
754 do_interrupt(env);
755#endif
756 }
757 env->exception_index = -1;
758 }
759#ifdef USE_KQEMU
760 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
761 int ret;
762 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
763 ret = kqemu_cpu_exec(env);
764 /* put eflags in CPU temporary format */
765 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
766 DF = 1 - (2 * ((env->eflags >> 10) & 1));
767 CC_OP = CC_OP_EFLAGS;
768 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
769 if (ret == 1) {
770 /* exception */
771 longjmp(env->jmp_env, 1);
772 } else if (ret == 2) {
773 /* softmmu execution needed */
774 } else {
775 if (env->interrupt_request != 0) {
776 /* hardware interrupt will be executed just after */
777 } else {
778 /* otherwise, we restart */
779 longjmp(env->jmp_env, 1);
780 }
781 }
782 }
783#endif
784
785 T0 = 0; /* force lookup of first TB */
786 for(;;) {
787#if defined(__sparc__) && !defined(HOST_SOLARIS)
788 /* g1 can be modified by some libc? functions */
789 tmp_T0 = T0;
790#endif
791 interrupt_request = env->interrupt_request;
792 if (__builtin_expect(interrupt_request, 0)) {
793#ifdef VBOX
794 /* Single instruction exec request, we execute it and return (one way or the other).
795 The caller will always reschedule after doing this operation! */
796 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
797 {
798 /* not in flight are we? */
799 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
800 {
801 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
802 env->exception_index = EXCP_SINGLE_INSTR;
803 if (emulate_single_instr(env) == -1)
804 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%VGv!!\n", env->eip));
805
806 /* When we receive an external interrupt during execution of this single
807 instruction, then we should stay here. We will leave when we're ready
808 for raw-mode or when interrupted by pending EMT requests. */
809 interrupt_request = env->interrupt_request; /* reload this! */
810 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
811 || !(env->eflags & IF_MASK)
812 || (env->hflags & HF_INHIBIT_IRQ_MASK)
813 )
814 {
815 env->exception_index = ret = EXCP_SINGLE_INSTR;
816 cpu_loop_exit();
817 }
818 }
819 env->exception_index = EXCP_SINGLE_INSTR;
820 cpu_loop_exit();
821 }
822
823 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
824#endif /* VBOX */
825#if defined(TARGET_I386)
826 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
827 !(env->hflags & HF_SMM_MASK)) {
828 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
829 do_smm_enter();
830#if defined(__sparc__) && !defined(HOST_SOLARIS)
831 tmp_T0 = 0;
832#else
833 T0 = 0;
834#endif
835 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
836 (env->eflags & IF_MASK) &&
837 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
838 int intno;
839#if defined(VBOX)
840 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
841#else
842 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
843#endif
844 intno = cpu_get_pic_interrupt(env);
845 if (loglevel & CPU_LOG_TB_IN_ASM) {
846 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
847 }
848#if defined(VBOX)
849 if (intno >= 0)
850#endif
851 do_interrupt(intno, 0, 0, 0, 1);
852 /* ensure that no TB jump will be modified as
853 the program flow was changed */
854#if defined(__sparc__) && !defined(HOST_SOLARIS)
855 tmp_T0 = 0;
856#else
857 T0 = 0;
858#endif
859 }
860#elif defined(TARGET_PPC)
861#if 0
862 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
863 cpu_ppc_reset(env);
864 }
865#endif
866 if (msr_ee != 0) {
867 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
868 /* Raise it */
869 env->exception_index = EXCP_EXTERNAL;
870 env->error_code = 0;
871 do_interrupt(env);
872 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
873#if defined(__sparc__) && !defined(HOST_SOLARIS)
874 tmp_T0 = 0;
875#else
876 T0 = 0;
877#endif
878 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
879 /* Raise it */
880 env->exception_index = EXCP_DECR;
881 env->error_code = 0;
882 do_interrupt(env);
883 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
884#if defined(__sparc__) && !defined(HOST_SOLARIS)
885 tmp_T0 = 0;
886#else
887 T0 = 0;
888#endif
889 }
890 }
891#elif defined(TARGET_MIPS)
892 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
893 (env->CP0_Status & (1 << CP0St_IE)) &&
894 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
895 !(env->hflags & MIPS_HFLAG_EXL) &&
896 !(env->hflags & MIPS_HFLAG_ERL) &&
897 !(env->hflags & MIPS_HFLAG_DM)) {
898 /* Raise it */
899 env->exception_index = EXCP_EXT_INTERRUPT;
900 env->error_code = 0;
901 do_interrupt(env);
902#if defined(__sparc__) && !defined(HOST_SOLARIS)
903 tmp_T0 = 0;
904#else
905 T0 = 0;
906#endif
907 }
908#elif defined(TARGET_SPARC)
909 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
910 (env->psret != 0)) {
911 int pil = env->interrupt_index & 15;
912 int type = env->interrupt_index & 0xf0;
913
914 if (((type == TT_EXTINT) &&
915 (pil == 15 || pil > env->psrpil)) ||
916 type != TT_EXTINT) {
917 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
918 do_interrupt(env->interrupt_index);
919 env->interrupt_index = 0;
920#if defined(__sparc__) && !defined(HOST_SOLARIS)
921 tmp_T0 = 0;
922#else
923 T0 = 0;
924#endif
925 }
926 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
927 //do_interrupt(0, 0, 0, 0, 0);
928 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
929 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
930 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
931 env->halted = 1;
932 env->exception_index = EXCP_HLT;
933 cpu_loop_exit();
934 }
935#elif defined(TARGET_ARM)
936 if (interrupt_request & CPU_INTERRUPT_FIQ
937 && !(env->uncached_cpsr & CPSR_F)) {
938 env->exception_index = EXCP_FIQ;
939 do_interrupt(env);
940 }
941 if (interrupt_request & CPU_INTERRUPT_HARD
942 && !(env->uncached_cpsr & CPSR_I)) {
943 env->exception_index = EXCP_IRQ;
944 do_interrupt(env);
945 }
946#elif defined(TARGET_SH4)
947 /* XXXXX */
948#endif
949 /* Don't use the cached interupt_request value,
950 do_interrupt may have updated the EXITTB flag. */
951 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
952#if defined(VBOX)
953 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
954#else
955 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
956#endif
957 /* ensure that no TB jump will be modified as
958 the program flow was changed */
959#if defined(__sparc__) && !defined(HOST_SOLARIS)
960 tmp_T0 = 0;
961#else
962 T0 = 0;
963#endif
964 }
965#ifdef VBOX
966 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
967#endif
968 if (interrupt_request & CPU_INTERRUPT_EXIT) {
969#if defined(VBOX)
970 env->exception_index = EXCP_INTERRUPT;
971 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
972#else
973 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
974 env->exception_index = EXCP_INTERRUPT;
975#endif
976 cpu_loop_exit();
977 }
978#if defined(VBOX)
979 if (interrupt_request & CPU_INTERRUPT_RC) {
980 env->exception_index = EXCP_RC;
981 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
982 cpu_loop_exit();
983 }
984#endif
985 }
986#ifdef DEBUG_EXEC
987 if ((loglevel & CPU_LOG_TB_CPU)) {
988#if defined(TARGET_I386)
989 /* restore flags in standard format */
990#ifdef reg_EAX
991 env->regs[R_EAX] = EAX;
992#endif
993#ifdef reg_EBX
994 env->regs[R_EBX] = EBX;
995#endif
996#ifdef reg_ECX
997 env->regs[R_ECX] = ECX;
998#endif
999#ifdef reg_EDX
1000 env->regs[R_EDX] = EDX;
1001#endif
1002#ifdef reg_ESI
1003 env->regs[R_ESI] = ESI;
1004#endif
1005#ifdef reg_EDI
1006 env->regs[R_EDI] = EDI;
1007#endif
1008#ifdef reg_EBP
1009 env->regs[R_EBP] = EBP;
1010#endif
1011#ifdef reg_ESP
1012 env->regs[R_ESP] = ESP;
1013#endif
1014 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1015 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1016 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1017#elif defined(TARGET_ARM)
1018 cpu_dump_state(env, logfile, fprintf, 0);
1019#elif defined(TARGET_SPARC)
1020 REGWPTR = env->regbase + (env->cwp * 16);
1021 env->regwptr = REGWPTR;
1022 cpu_dump_state(env, logfile, fprintf, 0);
1023#elif defined(TARGET_PPC)
1024 cpu_dump_state(env, logfile, fprintf, 0);
1025#elif defined(TARGET_M68K)
1026 cpu_m68k_flush_flags(env, env->cc_op);
1027 env->cc_op = CC_OP_FLAGS;
1028 env->sr = (env->sr & 0xffe0)
1029 | env->cc_dest | (env->cc_x << 4);
1030 cpu_dump_state(env, logfile, fprintf, 0);
1031#elif defined(TARGET_MIPS)
1032 cpu_dump_state(env, logfile, fprintf, 0);
1033#elif defined(TARGET_SH4)
1034 cpu_dump_state(env, logfile, fprintf, 0);
1035#else
1036#error unsupported target CPU
1037#endif
1038 }
1039#endif
1040#ifdef VBOX
1041 /*
1042 * Check if we the CPU state allows us to execute the code in raw-mode.
1043 */
1044 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
1045 if (remR3CanExecuteRaw(env,
1046 env->eip + env->segs[R_CS].base,
1047 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))
1048 flags, &env->exception_index))
1049 {
1050 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
1051 ret = env->exception_index;
1052 cpu_loop_exit();
1053 }
1054 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
1055#endif /* VBOX */
1056 tb = tb_find_fast();
1057#ifdef DEBUG_EXEC
1058 if ((loglevel & CPU_LOG_EXEC)) {
1059 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
1060 (long)tb->tc_ptr, tb->pc,
1061 lookup_symbol(tb->pc));
1062 }
1063#endif
1064#if defined(__sparc__) && !defined(HOST_SOLARIS)
1065 T0 = tmp_T0;
1066#endif
1067 /* see if we can patch the calling TB. When the TB
1068 spans two pages, we cannot safely do a direct
1069 jump. */
1070 {
1071 if (T0 != 0 &&
1072#if USE_KQEMU
1073 (env->kqemu_enabled != 2) &&
1074#endif
1075#ifdef VBOX
1076 !(tb->cflags & CF_RAW_MODE) &&
1077#endif
1078 tb->page_addr[1] == -1
1079#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1080 && (tb->cflags & CF_CODE_COPY) ==
1081 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
1082#endif
1083 ) {
1084 spin_lock(&tb_lock);
1085 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
1086#if defined(USE_CODE_COPY)
1087 /* propagates the FP use info */
1088 ((TranslationBlock *)(T0 & ~3))->cflags |=
1089 (tb->cflags & CF_FP_USED);
1090#endif
1091 spin_unlock(&tb_lock);
1092 }
1093 }
1094 tc_ptr = tb->tc_ptr;
1095 env->current_tb = tb;
1096 /* execute the generated code */
1097 gen_func = (void *)tc_ptr;
1098#if defined(__sparc__)
1099 __asm__ __volatile__("call %0\n\t"
1100 "mov %%o7,%%i0"
1101 : /* no outputs */
1102 : "r" (gen_func)
1103 : "i0", "i1", "i2", "i3", "i4", "i5",
1104 "l0", "l1", "l2", "l3", "l4", "l5",
1105 "l6", "l7");
1106#elif defined(__arm__)
1107 asm volatile ("mov pc, %0\n\t"
1108 ".global exec_loop\n\t"
1109 "exec_loop:\n\t"
1110 : /* no outputs */
1111 : "r" (gen_func)
1112 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
1113#elif defined(TARGET_I386) && defined(USE_CODE_COPY)
1114{
1115 if (!(tb->cflags & CF_CODE_COPY)) {
1116 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
1117 save_native_fp_state(env);
1118 }
1119 gen_func();
1120 } else {
1121 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
1122 restore_native_fp_state(env);
1123 }
1124 /* we work with native eflags */
1125 CC_SRC = cc_table[CC_OP].compute_all();
1126 CC_OP = CC_OP_EFLAGS;
1127 asm(".globl exec_loop\n"
1128 "\n"
1129 "debug1:\n"
1130 " pushl %%ebp\n"
1131 " fs movl %10, %9\n"
1132 " fs movl %11, %%eax\n"
1133 " andl $0x400, %%eax\n"
1134 " fs orl %8, %%eax\n"
1135 " pushl %%eax\n"
1136 " popf\n"
1137 " fs movl %%esp, %12\n"
1138 " fs movl %0, %%eax\n"
1139 " fs movl %1, %%ecx\n"
1140 " fs movl %2, %%edx\n"
1141 " fs movl %3, %%ebx\n"
1142 " fs movl %4, %%esp\n"
1143 " fs movl %5, %%ebp\n"
1144 " fs movl %6, %%esi\n"
1145 " fs movl %7, %%edi\n"
1146 " fs jmp *%9\n"
1147 "exec_loop:\n"
1148 " fs movl %%esp, %4\n"
1149 " fs movl %12, %%esp\n"
1150 " fs movl %%eax, %0\n"
1151 " fs movl %%ecx, %1\n"
1152 " fs movl %%edx, %2\n"
1153 " fs movl %%ebx, %3\n"
1154 " fs movl %%ebp, %5\n"
1155 " fs movl %%esi, %6\n"
1156 " fs movl %%edi, %7\n"
1157 " pushf\n"
1158 " popl %%eax\n"
1159 " movl %%eax, %%ecx\n"
1160 " andl $0x400, %%ecx\n"
1161 " shrl $9, %%ecx\n"
1162 " andl $0x8d5, %%eax\n"
1163 " fs movl %%eax, %8\n"
1164 " movl $1, %%eax\n"
1165 " subl %%ecx, %%eax\n"
1166 " fs movl %%eax, %11\n"
1167 " fs movl %9, %%ebx\n" /* get T0 value */
1168 " popl %%ebp\n"
1169 :
1170 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
1171 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
1172 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
1173 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
1174 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
1175 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
1176 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
1177 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
1178 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
1179 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
1180 "a" (gen_func),
1181 "m" (*(uint8_t *)offsetof(CPUState, df)),
1182 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
1183 : "%ecx", "%edx"
1184 );
1185 }
1186}
1187#elif defined(__ia64)
1188 struct fptr {
1189 void *ip;
1190 void *gp;
1191 } fp;
1192
1193 fp.ip = tc_ptr;
1194 fp.gp = code_gen_buffer + 2 * (1 << 20);
1195 (*(void (*)(void)) &fp)();
1196#else
1197#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
1198#if !defined(DEBUG_bird)
1199 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1200 {
1201 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1202 {
1203 Log(("EMR0: %VGv IF=%d TF=%d CPL=%d\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
1204 }
1205 }
1206 else
1207 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1208 {
1209 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1210 {
1211 if(env->eflags & VM_MASK)
1212 {
1213 Log(("EMV86: %VGv IF=%d TF=%d CPL=%d flags=%08X CR0=%RGr\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, flags, env->cr[0]));
1214 }
1215 else
1216 {
1217 Log(("EMR3: %VGv IF=%d TF=%d CPL=%d IOPL=%d flags=%08X CR0=%RGr\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), flags, env->cr[0]));
1218 }
1219 }
1220 }
1221#endif /* !DEBUG_bird */
1222 if(env->state & CPU_EMULATE_SINGLE_STEP)
1223 {
1224#ifdef DEBUG_bird
1225 static int s_cTimes = 0;
1226 if (s_cTimes++ > 1000000) /* 1 million */
1227 {
1228 RTLogPrintf("Enough stepping!\n");
1229 #if 0
1230 env->exception_index = EXCP_DEBUG;
1231 cpu_loop_exit();
1232 #else
1233 env->state &= ~CPU_EMULATE_SINGLE_STEP;
1234 #endif
1235 }
1236#endif
1237 TMCpuTickPause(env->pVM);
1238 remR3DisasInstr(env, -1, NULL);
1239 TMCpuTickResume(env->pVM);
1240 if(emulate_single_instr(env) == -1)
1241 {
1242 printf("emulate_single_instr failed for EIP=%VGv!!\n", env->eip);
1243 }
1244 }
1245 else
1246 {
1247 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1248 gen_func();
1249 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1250 }
1251#else /* !DEBUG || !VBOX || DEBUG_dmik */
1252
1253#ifdef VBOX
1254 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1255 gen_func();
1256 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1257#else /* !VBOX */
1258 gen_func();
1259#endif /* !VBOX */
1260
1261#endif /* !DEBUG || !VBOX || DEBUG_dmik */
1262#endif
1263 env->current_tb = NULL;
1264 /* reset soft MMU for next block (it can currently
1265 only be set by a memory fault) */
1266#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
1267 if (env->hflags & HF_SOFTMMU_MASK) {
1268 env->hflags &= ~HF_SOFTMMU_MASK;
1269 /* do not allow linking to another block */
1270 T0 = 0;
1271 }
1272#endif
1273#if defined(USE_KQEMU)
1274#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
1275 if (kqemu_is_ok(env) &&
1276 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
1277 cpu_loop_exit();
1278 }
1279#endif
1280 }
1281 } else {
1282 env_to_regs();
1283 }
1284 } /* for(;;) */
1285
1286
1287#if defined(TARGET_I386)
1288#if defined(USE_CODE_COPY)
1289 if (env->native_fp_regs) {
1290 save_native_fp_state(env);
1291 }
1292#endif
1293 /* restore flags in standard format */
1294 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1295#elif defined(TARGET_ARM)
1296 /* XXX: Save/restore host fpu exception state?. */
1297#elif defined(TARGET_SPARC)
1298#if defined(reg_REGWPTR)
1299 REGWPTR = saved_regwptr;
1300#endif
1301#elif defined(TARGET_PPC)
1302#elif defined(TARGET_M68K)
1303 cpu_m68k_flush_flags(env, env->cc_op);
1304 env->cc_op = CC_OP_FLAGS;
1305 env->sr = (env->sr & 0xffe0)
1306 | env->cc_dest | (env->cc_x << 4);
1307#elif defined(TARGET_MIPS)
1308#elif defined(TARGET_SH4)
1309 /* XXXXX */
1310#else
1311#error unsupported target CPU
1312#endif
1313#if defined(__sparc__) && !defined(HOST_SOLARIS)
1314 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
1315#endif
1316#include "hostregs_helper.h"
1317
1318 /* fail safe : never use cpu_single_env outside cpu_exec() */
1319 cpu_single_env = NULL;
1320 return ret;
1321}
1322
1323#endif /* !VBOX */
1324
1325/* must only be called from the generated code as an exception can be
1326 generated */
1327void tb_invalidate_page_range(target_ulong start, target_ulong end)
1328{
1329 /* XXX: cannot enable it yet because it yields to MMU exception
1330 where NIP != read address on PowerPC */
1331#if 0
1332 target_ulong phys_addr;
1333 phys_addr = get_phys_addr_code(env, start);
1334 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1335#endif
1336}
1337
1338#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1339
1340void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1341{
1342 CPUX86State *saved_env;
1343
1344 saved_env = env;
1345 env = s;
1346 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1347 selector &= 0xffff;
1348 cpu_x86_load_seg_cache(env, seg_reg, selector,
1349 (selector << 4), 0xffff, 0);
1350 } else {
1351 load_seg(seg_reg, selector);
1352 }
1353 env = saved_env;
1354}
1355
1356void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1357{
1358 CPUX86State *saved_env;
1359
1360 saved_env = env;
1361 env = s;
1362
1363 helper_fsave((target_ulong)ptr, data32);
1364
1365 env = saved_env;
1366}
1367
1368void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1369{
1370 CPUX86State *saved_env;
1371
1372 saved_env = env;
1373 env = s;
1374
1375 helper_frstor((target_ulong)ptr, data32);
1376
1377 env = saved_env;
1378}
1379
1380#endif /* TARGET_I386 */
1381
1382#if !defined(CONFIG_SOFTMMU)
1383
1384#if defined(TARGET_I386)
1385
1386/* 'pc' is the host PC at which the exception was raised. 'address' is
1387 the effective address of the memory exception. 'is_write' is 1 if a
1388 write caused the exception and otherwise 0'. 'old_set' is the
1389 signal set which should be restored */
1390static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1391 int is_write, sigset_t *old_set,
1392 void *puc)
1393{
1394 TranslationBlock *tb;
1395 int ret;
1396
1397 if (cpu_single_env)
1398 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1399#if defined(DEBUG_SIGNAL)
1400 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1401 pc, address, is_write, *(unsigned long *)old_set);
1402#endif
1403 /* XXX: locking issue */
1404 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1405 return 1;
1406 }
1407
1408 /* see if it is an MMU fault */
1409 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1410 ((env->hflags & HF_CPL_MASK) == 3), 0);
1411 if (ret < 0)
1412 return 0; /* not an MMU fault */
1413 if (ret == 0)
1414 return 1; /* the MMU fault was handled without causing real CPU fault */
1415 /* now we have a real cpu fault */
1416 tb = tb_find_pc(pc);
1417 if (tb) {
1418 /* the PC is inside the translated code. It means that we have
1419 a virtual CPU fault */
1420 cpu_restore_state(tb, env, pc, puc);
1421 }
1422 if (ret == 1) {
1423#if 0
1424 printf("PF exception: EIP=0x%VGv CR2=0x%VGv error=0x%x\n",
1425 env->eip, env->cr[2], env->error_code);
1426#endif
1427 /* we restore the process signal mask as the sigreturn should
1428 do it (XXX: use sigsetjmp) */
1429 sigprocmask(SIG_SETMASK, old_set, NULL);
1430 raise_exception_err(env->exception_index, env->error_code);
1431 } else {
1432 /* activate soft MMU for this block */
1433 env->hflags |= HF_SOFTMMU_MASK;
1434 cpu_resume_from_signal(env, puc);
1435 }
1436 /* never comes here */
1437 return 1;
1438}
1439
1440#elif defined(TARGET_ARM)
1441static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1442 int is_write, sigset_t *old_set,
1443 void *puc)
1444{
1445 TranslationBlock *tb;
1446 int ret;
1447
1448 if (cpu_single_env)
1449 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1450#if defined(DEBUG_SIGNAL)
1451 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1452 pc, address, is_write, *(unsigned long *)old_set);
1453#endif
1454 /* XXX: locking issue */
1455 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1456 return 1;
1457 }
1458 /* see if it is an MMU fault */
1459 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1460 if (ret < 0)
1461 return 0; /* not an MMU fault */
1462 if (ret == 0)
1463 return 1; /* the MMU fault was handled without causing real CPU fault */
1464 /* now we have a real cpu fault */
1465 tb = tb_find_pc(pc);
1466 if (tb) {
1467 /* the PC is inside the translated code. It means that we have
1468 a virtual CPU fault */
1469 cpu_restore_state(tb, env, pc, puc);
1470 }
1471 /* we restore the process signal mask as the sigreturn should
1472 do it (XXX: use sigsetjmp) */
1473 sigprocmask(SIG_SETMASK, old_set, NULL);
1474 cpu_loop_exit();
1475}
1476#elif defined(TARGET_SPARC)
1477static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1478 int is_write, sigset_t *old_set,
1479 void *puc)
1480{
1481 TranslationBlock *tb;
1482 int ret;
1483
1484 if (cpu_single_env)
1485 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1486#if defined(DEBUG_SIGNAL)
1487 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1488 pc, address, is_write, *(unsigned long *)old_set);
1489#endif
1490 /* XXX: locking issue */
1491 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1492 return 1;
1493 }
1494 /* see if it is an MMU fault */
1495 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1496 if (ret < 0)
1497 return 0; /* not an MMU fault */
1498 if (ret == 0)
1499 return 1; /* the MMU fault was handled without causing real CPU fault */
1500 /* now we have a real cpu fault */
1501 tb = tb_find_pc(pc);
1502 if (tb) {
1503 /* the PC is inside the translated code. It means that we have
1504 a virtual CPU fault */
1505 cpu_restore_state(tb, env, pc, puc);
1506 }
1507 /* we restore the process signal mask as the sigreturn should
1508 do it (XXX: use sigsetjmp) */
1509 sigprocmask(SIG_SETMASK, old_set, NULL);
1510 cpu_loop_exit();
1511}
1512#elif defined (TARGET_PPC)
1513static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1514 int is_write, sigset_t *old_set,
1515 void *puc)
1516{
1517 TranslationBlock *tb;
1518 int ret;
1519
1520 if (cpu_single_env)
1521 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1522#if defined(DEBUG_SIGNAL)
1523 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1524 pc, address, is_write, *(unsigned long *)old_set);
1525#endif
1526 /* XXX: locking issue */
1527 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1528 return 1;
1529 }
1530
1531 /* see if it is an MMU fault */
1532 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1533 if (ret < 0)
1534 return 0; /* not an MMU fault */
1535 if (ret == 0)
1536 return 1; /* the MMU fault was handled without causing real CPU fault */
1537
1538 /* now we have a real cpu fault */
1539 tb = tb_find_pc(pc);
1540 if (tb) {
1541 /* the PC is inside the translated code. It means that we have
1542 a virtual CPU fault */
1543 cpu_restore_state(tb, env, pc, puc);
1544 }
1545 if (ret == 1) {
1546#if 0
1547 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1548 env->nip, env->error_code, tb);
1549#endif
1550 /* we restore the process signal mask as the sigreturn should
1551 do it (XXX: use sigsetjmp) */
1552 sigprocmask(SIG_SETMASK, old_set, NULL);
1553 do_raise_exception_err(env->exception_index, env->error_code);
1554 } else {
1555 /* activate soft MMU for this block */
1556 cpu_resume_from_signal(env, puc);
1557 }
1558 /* never comes here */
1559 return 1;
1560}
1561
1562#elif defined(TARGET_M68K)
1563static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1564 int is_write, sigset_t *old_set,
1565 void *puc)
1566{
1567 TranslationBlock *tb;
1568 int ret;
1569
1570 if (cpu_single_env)
1571 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1572#if defined(DEBUG_SIGNAL)
1573 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1574 pc, address, is_write, *(unsigned long *)old_set);
1575#endif
1576 /* XXX: locking issue */
1577 if (is_write && page_unprotect(address, pc, puc)) {
1578 return 1;
1579 }
1580 /* see if it is an MMU fault */
1581 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1582 if (ret < 0)
1583 return 0; /* not an MMU fault */
1584 if (ret == 0)
1585 return 1; /* the MMU fault was handled without causing real CPU fault */
1586 /* now we have a real cpu fault */
1587 tb = tb_find_pc(pc);
1588 if (tb) {
1589 /* the PC is inside the translated code. It means that we have
1590 a virtual CPU fault */
1591 cpu_restore_state(tb, env, pc, puc);
1592 }
1593 /* we restore the process signal mask as the sigreturn should
1594 do it (XXX: use sigsetjmp) */
1595 sigprocmask(SIG_SETMASK, old_set, NULL);
1596 cpu_loop_exit();
1597 /* never comes here */
1598 return 1;
1599}
1600
1601#elif defined (TARGET_MIPS)
1602static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1603 int is_write, sigset_t *old_set,
1604 void *puc)
1605{
1606 TranslationBlock *tb;
1607 int ret;
1608
1609 if (cpu_single_env)
1610 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1611#if defined(DEBUG_SIGNAL)
1612 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1613 pc, address, is_write, *(unsigned long *)old_set);
1614#endif
1615 /* XXX: locking issue */
1616 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1617 return 1;
1618 }
1619
1620 /* see if it is an MMU fault */
1621 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1622 if (ret < 0)
1623 return 0; /* not an MMU fault */
1624 if (ret == 0)
1625 return 1; /* the MMU fault was handled without causing real CPU fault */
1626
1627 /* now we have a real cpu fault */
1628 tb = tb_find_pc(pc);
1629 if (tb) {
1630 /* the PC is inside the translated code. It means that we have
1631 a virtual CPU fault */
1632 cpu_restore_state(tb, env, pc, puc);
1633 }
1634 if (ret == 1) {
1635#if 0
1636 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1637 env->nip, env->error_code, tb);
1638#endif
1639 /* we restore the process signal mask as the sigreturn should
1640 do it (XXX: use sigsetjmp) */
1641 sigprocmask(SIG_SETMASK, old_set, NULL);
1642 do_raise_exception_err(env->exception_index, env->error_code);
1643 } else {
1644 /* activate soft MMU for this block */
1645 cpu_resume_from_signal(env, puc);
1646 }
1647 /* never comes here */
1648 return 1;
1649}
1650
1651#elif defined (TARGET_SH4)
1652static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1653 int is_write, sigset_t *old_set,
1654 void *puc)
1655{
1656 TranslationBlock *tb;
1657 int ret;
1658
1659 if (cpu_single_env)
1660 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1661#if defined(DEBUG_SIGNAL)
1662 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1663 pc, address, is_write, *(unsigned long *)old_set);
1664#endif
1665 /* XXX: locking issue */
1666 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1667 return 1;
1668 }
1669
1670 /* see if it is an MMU fault */
1671 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1672 if (ret < 0)
1673 return 0; /* not an MMU fault */
1674 if (ret == 0)
1675 return 1; /* the MMU fault was handled without causing real CPU fault */
1676
1677 /* now we have a real cpu fault */
1678 tb = tb_find_pc(pc);
1679 if (tb) {
1680 /* the PC is inside the translated code. It means that we have
1681 a virtual CPU fault */
1682 cpu_restore_state(tb, env, pc, puc);
1683 }
1684#if 0
1685 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1686 env->nip, env->error_code, tb);
1687#endif
1688 /* we restore the process signal mask as the sigreturn should
1689 do it (XXX: use sigsetjmp) */
1690 sigprocmask(SIG_SETMASK, old_set, NULL);
1691 cpu_loop_exit();
1692 /* never comes here */
1693 return 1;
1694}
1695#else
1696#error unsupported target CPU
1697#endif
1698
1699#if defined(__i386__)
1700
1701#if defined(USE_CODE_COPY)
1702static void cpu_send_trap(unsigned long pc, int trap,
1703 struct ucontext *uc)
1704{
1705 TranslationBlock *tb;
1706
1707 if (cpu_single_env)
1708 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1709 /* now we have a real cpu fault */
1710 tb = tb_find_pc(pc);
1711 if (tb) {
1712 /* the PC is inside the translated code. It means that we have
1713 a virtual CPU fault */
1714 cpu_restore_state(tb, env, pc, uc);
1715 }
1716 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1717 raise_exception_err(trap, env->error_code);
1718}
1719#endif
1720
1721int cpu_signal_handler(int host_signum, void *pinfo,
1722 void *puc)
1723{
1724 siginfo_t *info = pinfo;
1725 struct ucontext *uc = puc;
1726 unsigned long pc;
1727 int trapno;
1728
1729#ifndef REG_EIP
1730/* for glibc 2.1 */
1731#define REG_EIP EIP
1732#define REG_ERR ERR
1733#define REG_TRAPNO TRAPNO
1734#endif
1735 pc = uc->uc_mcontext.gregs[REG_EIP];
1736 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1737#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1738 if (trapno == 0x00 || trapno == 0x05) {
1739 /* send division by zero or bound exception */
1740 cpu_send_trap(pc, trapno, uc);
1741 return 1;
1742 } else
1743#endif
1744 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1745 trapno == 0xe ?
1746 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1747 &uc->uc_sigmask, puc);
1748}
1749
1750#elif defined(__x86_64__)
1751
1752int cpu_signal_handler(int host_signum, void *pinfo,
1753 void *puc)
1754{
1755 siginfo_t *info = pinfo;
1756 struct ucontext *uc = puc;
1757 unsigned long pc;
1758
1759 pc = uc->uc_mcontext.gregs[REG_RIP];
1760 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1761 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1762 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1763 &uc->uc_sigmask, puc);
1764}
1765
1766#elif defined(__powerpc__)
1767
1768/***********************************************************************
1769 * signal context platform-specific definitions
1770 * From Wine
1771 */
1772#ifdef linux
1773/* All Registers access - only for local access */
1774# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1775/* Gpr Registers access */
1776# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1777# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1778# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1779# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1780# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1781# define LR_sig(context) REG_sig(link, context) /* Link register */
1782# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1783/* Float Registers access */
1784# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1785# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1786/* Exception Registers access */
1787# define DAR_sig(context) REG_sig(dar, context)
1788# define DSISR_sig(context) REG_sig(dsisr, context)
1789# define TRAP_sig(context) REG_sig(trap, context)
1790#endif /* linux */
1791
1792#ifdef __APPLE__
1793# include <sys/ucontext.h>
1794typedef struct ucontext SIGCONTEXT;
1795/* All Registers access - only for local access */
1796# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1797# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1798# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1799# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1800/* Gpr Registers access */
1801# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1802# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1803# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1804# define CTR_sig(context) REG_sig(ctr, context)
1805# define XER_sig(context) REG_sig(xer, context) /* Link register */
1806# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1807# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1808/* Float Registers access */
1809# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1810# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1811/* Exception Registers access */
1812# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1813# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1814# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1815#endif /* __APPLE__ */
1816
1817int cpu_signal_handler(int host_signum, void *pinfo,
1818 void *puc)
1819{
1820 siginfo_t *info = pinfo;
1821 struct ucontext *uc = puc;
1822 unsigned long pc;
1823 int is_write;
1824
1825 pc = IAR_sig(uc);
1826 is_write = 0;
1827#if 0
1828 /* ppc 4xx case */
1829 if (DSISR_sig(uc) & 0x00800000)
1830 is_write = 1;
1831#else
1832 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1833 is_write = 1;
1834#endif
1835 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1836 is_write, &uc->uc_sigmask, puc);
1837}
1838
1839#elif defined(__alpha__)
1840
1841int cpu_signal_handler(int host_signum, void *pinfo,
1842 void *puc)
1843{
1844 siginfo_t *info = pinfo;
1845 struct ucontext *uc = puc;
1846 uint32_t *pc = uc->uc_mcontext.sc_pc;
1847 uint32_t insn = *pc;
1848 int is_write = 0;
1849
1850 /* XXX: need kernel patch to get write flag faster */
1851 switch (insn >> 26) {
1852 case 0x0d: // stw
1853 case 0x0e: // stb
1854 case 0x0f: // stq_u
1855 case 0x24: // stf
1856 case 0x25: // stg
1857 case 0x26: // sts
1858 case 0x27: // stt
1859 case 0x2c: // stl
1860 case 0x2d: // stq
1861 case 0x2e: // stl_c
1862 case 0x2f: // stq_c
1863 is_write = 1;
1864 }
1865
1866 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1867 is_write, &uc->uc_sigmask, puc);
1868}
1869#elif defined(__sparc__)
1870
1871int cpu_signal_handler(int host_signum, void *pinfo,
1872 void *puc)
1873{
1874 siginfo_t *info = pinfo;
1875 uint32_t *regs = (uint32_t *)(info + 1);
1876 void *sigmask = (regs + 20);
1877 unsigned long pc;
1878 int is_write;
1879 uint32_t insn;
1880
1881 /* XXX: is there a standard glibc define ? */
1882 pc = regs[1];
1883 /* XXX: need kernel patch to get write flag faster */
1884 is_write = 0;
1885 insn = *(uint32_t *)pc;
1886 if ((insn >> 30) == 3) {
1887 switch((insn >> 19) & 0x3f) {
1888 case 0x05: // stb
1889 case 0x06: // sth
1890 case 0x04: // st
1891 case 0x07: // std
1892 case 0x24: // stf
1893 case 0x27: // stdf
1894 case 0x25: // stfsr
1895 is_write = 1;
1896 break;
1897 }
1898 }
1899 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1900 is_write, sigmask, NULL);
1901}
1902
1903#elif defined(__arm__)
1904
1905int cpu_signal_handler(int host_signum, void *pinfo,
1906 void *puc)
1907{
1908 siginfo_t *info = pinfo;
1909 struct ucontext *uc = puc;
1910 unsigned long pc;
1911 int is_write;
1912
1913 pc = uc->uc_mcontext.gregs[R15];
1914 /* XXX: compute is_write */
1915 is_write = 0;
1916 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1917 is_write,
1918 &uc->uc_sigmask, puc);
1919}
1920
1921#elif defined(__mc68000)
1922
1923int cpu_signal_handler(int host_signum, void *pinfo,
1924 void *puc)
1925{
1926 siginfo_t *info = pinfo;
1927 struct ucontext *uc = puc;
1928 unsigned long pc;
1929 int is_write;
1930
1931 pc = uc->uc_mcontext.gregs[16];
1932 /* XXX: compute is_write */
1933 is_write = 0;
1934 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1935 is_write,
1936 &uc->uc_sigmask, puc);
1937}
1938
1939#elif defined(__ia64)
1940
1941#ifndef __ISR_VALID
1942 /* This ought to be in <bits/siginfo.h>... */
1943# define __ISR_VALID 1
1944#endif
1945
1946int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1947{
1948 siginfo_t *info = pinfo;
1949 struct ucontext *uc = puc;
1950 unsigned long ip;
1951 int is_write = 0;
1952
1953 ip = uc->uc_mcontext.sc_ip;
1954 switch (host_signum) {
1955 case SIGILL:
1956 case SIGFPE:
1957 case SIGSEGV:
1958 case SIGBUS:
1959 case SIGTRAP:
1960 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1961 /* ISR.W (write-access) is bit 33: */
1962 is_write = (info->si_isr >> 33) & 1;
1963 break;
1964
1965 default:
1966 break;
1967 }
1968 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1969 is_write,
1970 &uc->uc_sigmask, puc);
1971}
1972
1973#elif defined(__s390__)
1974
1975int cpu_signal_handler(int host_signum, void *pinfo,
1976 void *puc)
1977{
1978 siginfo_t *info = pinfo;
1979 struct ucontext *uc = puc;
1980 unsigned long pc;
1981 int is_write;
1982
1983 pc = uc->uc_mcontext.psw.addr;
1984 /* XXX: compute is_write */
1985 is_write = 0;
1986 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1987 is_write,
1988 &uc->uc_sigmask, puc);
1989}
1990
1991#else
1992
1993#error host CPU specific signal handler needed
1994
1995#endif
1996
1997#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use