Changeset 13731 in vbox
- Timestamp:
- Nov 1, 2008 7:39:12 PM (16 years ago)
- Location:
- trunk/src/recompiler_new
- Files:
-
- 2 deleted
- 9 edited
-
Sun/config-host.h (modified) (1 diff)
-
VBoxRecompiler.c (modified) (11 diffs)
-
dyngen-exec.h (modified) (1 diff)
-
dyngen.c (deleted)
-
exec-all.h (modified) (2 diffs)
-
exec.c (modified) (1 diff)
-
osdep.h (modified) (1 diff)
-
softmmu_header.h (modified) (7 diffs)
-
softmmu_template.h (modified) (3 diffs)
-
target-i386/cpu.h (modified) (1 diff)
-
target-i386/op.c (deleted)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler_new/Sun/config-host.h
r8168 r13731 21 21 22 22 23 #if defined(RT_ARCH_AMD64) || defined(HOST_X86_64) /* The latter, for dyngen when cross compiling (windows, l4, etc). */23 #if defined(RT_ARCH_AMD64) 24 24 # define HOST_X86_64 1 25 25 # define HOST_LONG_BITS 64 -
trunk/src/recompiler_new/VBoxRecompiler.c
r13716 r13731 3080 3080 VBOX_CHECK_ADDR(SrcGCPhys); 3081 3081 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb); 3082 #ifdef DEBUG_PHYS 3083 LogRel(("read(%d): %p\n", cb, SrcGCPhys)); 3084 #endif 3082 3085 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3083 3086 } … … 3094 3097 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3095 3098 VBOX_CHECK_ADDR(SrcGCPhys); 3096 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys); 3099 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys); 3097 3100 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3101 #ifdef DEBUG_PHYS 3102 LogRel(("readu8: %x <- %p\n", val, SrcGCPhys)); 3103 #endif 3098 3104 return val; 3099 3105 } … … 3112 3118 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys); 3113 3119 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3120 #ifdef DEBUG_PHYS 3121 LogRel(("reads8: %x <- %p\n", val, SrcGCPhys)); 3122 #endif 3114 3123 return val; 3115 3124 } … … 3128 3137 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys); 3129 3138 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3139 #ifdef DEBUG_PHYS 3140 LogRel(("readu16: %x <- %p\n", val, SrcGCPhys)); 3141 #endif 3130 3142 return val; 3131 3143 } … … 3144 3156 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys); 3145 3157 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3158 #ifdef DEBUG_PHYS 3159 LogRel(("reads16: %x <- %p\n", val, SrcGCPhys)); 3160 #endif 3146 3161 return val; 3147 3162 } … … 3160 3175 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys); 3161 3176 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3177 #ifdef DEBUG_PHYS 3178 LogRel(("readu32: %x <- %p\n", val, SrcGCPhys)); 3179 #endif 3162 3180 return val; 3163 3181 } … … 3176 3194 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys); 3177 3195 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3196 #ifdef DEBUG_PHYS 3197 LogRel(("reads32: %x <- %p\n", val, SrcGCPhys)); 3198 #endif 3178 3199 return val; 3179 3200 } … … 3224 3245 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb); 3225 3246 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3247 LogRel(("write(%d): %p\n", cb, DstGCPhys)); 3226 3248 } 3227 3249 … … 3239 3261 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val); 3240 3262 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3263 LogRel(("writeu8: %x -> %p\n", val, DstGCPhys)); 3241 3264 } 3242 3265 … … 3254 3277 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val); 3255 3278 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3279 LogRel(("writeu16: %x -> %p\n", val, DstGCPhys)); 3256 3280 } 3257 3281 … … 3269 3293 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val); 3270 3294 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3295 LogRel(("writeu32: %x -> %p\n", val, DstGCPhys)); 3271 3296 } 3272 3297 -
trunk/src/recompiler_new/dyngen-exec.h
r13726 r13731 277 277 #endif 278 278 279 #ifdef __i386__280 #define EXIT_TB() asm volatile ("ret")281 #define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n)282 #endif283 #ifdef __x86_64__284 #define EXIT_TB() asm volatile ("ret")285 #define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n)286 #endif287 #ifdef __powerpc__288 #define EXIT_TB() asm volatile ("blr")289 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)290 #endif291 #ifdef __s390__292 #define EXIT_TB() asm volatile ("br %r14")293 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)294 #endif295 #ifdef __alpha__296 #define EXIT_TB() asm volatile ("ret")297 #endif298 #ifdef __ia64__299 #define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;")300 #define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \301 ASM_NAME(__op_gen_label) #n)302 #endif303 #ifdef __sparc__304 #define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0; nop")305 #define GOTO_LABEL_PARAM(n) asm volatile ("ba " ASM_NAME(__op_gen_label) #n ";nop")306 #endif307 #ifdef __arm__308 #define EXIT_TB() asm volatile ("b exec_loop")309 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)310 #endif311 #ifdef __mc68000312 #define EXIT_TB() asm volatile ("rts")313 #endif314 315 316 279 #ifdef VBOX 317 280 #define GETPC() ASMReturnAddress() -
trunk/src/recompiler_new/exec-all.h
r13382 r13731 348 348 #if !defined(CONFIG_USER_ONLY) 349 349 350 void tlb_fill(target_ulong addr, int is_write, int is_user,350 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, 351 351 void *retaddr); 352 352 … … 391 391 is the offset relative to phys_ram_base */ 392 392 #ifndef VBOX 393 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) 394 #else 395 DECLINLINE(target_ulong) get_phys_addr_code(CPUState *env, target_ulong addr) 396 #endif 397 { 398 int is_user, index, pd; 399 400 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 401 #if defined(TARGET_I386) 402 is_user = ((env->hflags & HF_CPL_MASK) == 3); 403 #elif defined (TARGET_PPC) 404 is_user = msr_pr; 405 #elif defined (TARGET_MIPS) 406 is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM); 407 #elif defined (TARGET_SPARC) 408 is_user = (env->psrs == 0); 409 #elif defined (TARGET_ARM) 410 is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR); 411 #elif defined (TARGET_SH4) 412 is_user = ((env->sr & SR_MD) == 0); 413 #else 414 #error unimplemented CPU 415 #endif 416 #ifndef VBOX 417 if (__builtin_expect(env->tlb_table[is_user][index].addr_code != 418 (addr & TARGET_PAGE_MASK), 0)) { 419 #else 420 if (RT_UNLIKELY(env->tlb_table[is_user][index].addr_code != 421 (addr & TARGET_PAGE_MASK))) { 422 #endif 393 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) 394 #else 395 DECLINLINE(target_ulong) get_phys_addr_code(CPUState *env1, target_ulong addr) 396 #endif 397 { 398 int mmu_idx, page_index, pd; 399 400 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 401 mmu_idx = cpu_mmu_index(env1); 402 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != 403 (addr & TARGET_PAGE_MASK))) { 423 404 ldub_code(addr); 424 405 } 425 pd = env ->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;406 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK; 426 407 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { 427 408 # ifdef VBOX 428 409 /* deal with non-MMIO access handlers. */ 429 return remR3PhysGetPhysicalAddressCode(env, addr, &env->tlb_table[is_user][index]); 410 return remR3PhysGetPhysicalAddressCode(env1, addr, &env1->tlb_table[mmu_idx][page_index]); 411 # elif defined(TARGET_SPARC) || defined(TARGET_MIPS) 412 do_unassigned_access(addr, 0, 1, 0, 4); 413 #else 414 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); 415 #endif 416 } 417 418 # if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 419 return addr + env1->tlb_table[mmu_idx][page_index].addend; 420 # elif defined(VBOX) 421 return remR3HCVirt2GCPhys(env, (void *)(addr + env1->tlb_table[mmu_idx][page_index].addend)); 430 422 # else 431 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr); 432 # endif 433 } 434 # if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 435 return addr + env->tlb_table[is_user][index].addend; 436 # elif defined(VBOX) 437 return remR3HCVirt2GCPhys(env, (void *)(addr + env->tlb_table[is_user][index].addend)); 438 # else 439 return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base; 423 return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base; 440 424 # endif 441 425 } -
trunk/src/recompiler_new/exec.c
r13652 r13731 2299 2299 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 2300 2300 target_phys_addr_t paddr, int prot, 2301 int is_user, int is_softmmu)2301 int mmu_idx, int is_softmmu) 2302 2302 { 2303 2303 return 0; -
trunk/src/recompiler_new/osdep.h
r13384 r13731 30 30 # define NULL 0 31 31 #endif 32 33 #define fflush(file) RTLogFlush(NULL) 34 #define printf(...) LogIt(LOG_INSTANCE, 0, LOG_GROUP_REM_PRINTF, (__VA_ARGS__)) 35 #define fprintf(logfile, ...) LogIt(LOG_INSTANCE, 0, LOG_GROUP_REM_PRINTF, (__VA_ARGS__)) 36 32 37 33 38 #else /* !VBOX */ -
trunk/src/recompiler_new/softmmu_header.h
r13382 r13731 81 81 82 82 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \ 83 (ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU) 84 85 #ifdef VBOX 86 /* generic store macro */ 87 88 DELCINLINE(void) glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) 89 { 90 int index; 91 target_ulong addr; 92 unsigned long physaddr; 93 int is_user; 94 95 addr = ptr; 96 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 97 is_user = CPU_MMU_INDEX; 98 if (__builtin_expect(env->tlb_table[is_user][index].addr_write != 99 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 100 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user); 101 } else { 102 physaddr = addr + env->tlb_table[is_user][index].addend; 103 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); 104 } 105 } 106 107 #else /* !VBOX */ 83 (ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU) && !defined(VBOX) 84 85 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) 86 { 87 int res; 88 89 asm volatile ("movl %1, %%edx\n" 90 "movl %1, %%eax\n" 91 "shrl %3, %%edx\n" 92 "andl %4, %%eax\n" 93 "andl %2, %%edx\n" 94 "leal %5(%%edx, %%ebp), %%edx\n" 95 "cmpl (%%edx), %%eax\n" 96 "movl %1, %%eax\n" 97 "je 1f\n" 98 "movl %6, %%edx\n" 99 "call %7\n" 100 "movl %%eax, %0\n" 101 "jmp 2f\n" 102 "1:\n" 103 "addl 12(%%edx), %%eax\n" 104 #if DATA_SIZE == 1 105 "movzbl (%%eax), %0\n" 106 #elif DATA_SIZE == 2 107 "movzwl (%%eax), %0\n" 108 #elif DATA_SIZE == 4 109 "movl (%%eax), %0\n" 110 #else 111 #error unsupported size 112 #endif 113 "2:\n" 114 : "=r" (res) 115 : "r" (ptr), 116 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), 117 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), 118 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 119 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)), 120 "i" (CPU_MMU_INDEX), 121 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX)) 122 : "%eax", "%ecx", "%edx", "memory", "cc"); 123 return res; 124 } 125 126 #if DATA_SIZE <= 2 127 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) 128 { 129 int res; 130 131 asm volatile ("movl %1, %%edx\n" 132 "movl %1, %%eax\n" 133 "shrl %3, %%edx\n" 134 "andl %4, %%eax\n" 135 "andl %2, %%edx\n" 136 "leal %5(%%edx, %%ebp), %%edx\n" 137 "cmpl (%%edx), %%eax\n" 138 "movl %1, %%eax\n" 139 "je 1f\n" 140 "movl %6, %%edx\n" 141 "call %7\n" 142 #if DATA_SIZE == 1 143 "movsbl %%al, %0\n" 144 #elif DATA_SIZE == 2 145 "movswl %%ax, %0\n" 146 #else 147 #error unsupported size 148 #endif 149 "jmp 2f\n" 150 "1:\n" 151 "addl 12(%%edx), %%eax\n" 152 #if DATA_SIZE == 1 153 "movsbl (%%eax), %0\n" 154 #elif DATA_SIZE == 2 155 "movswl (%%eax), %0\n" 156 #else 157 #error unsupported size 158 #endif 159 "2:\n" 160 : "=r" (res) 161 : "r" (ptr), 162 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), 163 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), 164 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 165 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)), 166 "i" (CPU_MMU_INDEX), 167 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX)) 168 : "%eax", "%ecx", "%edx", "memory", "cc"); 169 return res; 170 } 171 #endif 108 172 109 173 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) … … 127 191 #error unsupported size 128 192 #endif 129 " pushl %6\n"193 "movl %6, %%ecx\n" 130 194 "call %7\n" 131 "popl %%eax\n"132 195 "jmp 2f\n" 133 196 "1:\n" … … 143 206 #endif 144 207 "2:\n" 145 : 146 : "r" (ptr), 147 /* NOTE: 'q' would be needed as constraint, but we could not use it 148 with T1 ! */ 149 "r" (v), 150 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), 151 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), 208 : 209 : "r" (ptr), 210 #if DATA_SIZE == 1 211 "q" (v), 212 #else 213 "r" (v), 214 #endif 215 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), 216 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), 152 217 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 153 218 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)), … … 156 221 : "%eax", "%ecx", "%edx", "memory", "cc"); 157 222 } 158 #endif /* !VBOX */159 160 223 #else 161 224 … … 168 231 #endif 169 232 { 170 int index; 233 234 int page_index; 171 235 RES_TYPE res; 172 236 target_ulong addr; 173 237 unsigned long physaddr; 174 int is_user;238 int mmu_idx; 175 239 176 240 addr = ptr; 177 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 178 is_user = CPU_MMU_INDEX; 179 #ifndef VBOX 180 if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ != 181 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 182 #else 183 if (RT_UNLIKELY(env->tlb_table[is_user][index].ADDR_READ != 184 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 185 #endif 186 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); 241 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 242 mmu_idx = CPU_MMU_INDEX; 243 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != 244 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 245 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx); 187 246 } else { 188 physaddr = addr + env->tlb_table[ is_user][index].addend;247 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; 189 248 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); 190 249 } … … 199 258 #endif 200 259 { 201 int res, index;260 int res, page_index; 202 261 target_ulong addr; 203 262 unsigned long physaddr; 204 int is_user;263 int mmu_idx; 205 264 206 265 addr = ptr; 207 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 208 is_user = CPU_MMU_INDEX; 209 #ifndef VBOX 210 if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ != 211 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 212 #else 213 if (RT_UNLIKELY(env->tlb_table[is_user][index].ADDR_READ != 214 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 215 #endif 216 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); 266 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 267 mmu_idx = CPU_MMU_INDEX; 268 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != 269 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 270 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx); 217 271 } else { 218 physaddr = addr + env->tlb_table[ is_user][index].addend;272 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; 219 273 res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr); 220 274 } … … 232 286 #endif 233 287 { 234 int index;288 int page_index; 235 289 target_ulong addr; 236 290 unsigned long physaddr; 237 int is_user;291 int mmu_idx; 238 292 239 293 addr = ptr; 240 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 241 is_user = CPU_MMU_INDEX; 242 #ifndef VBOX 243 if (__builtin_expect(env->tlb_table[is_user][index].addr_write != 244 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 245 #else 246 if (RT_UNLIKELY(env->tlb_table[is_user][index].addr_write != 247 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 248 #endif 249 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user); 294 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 295 mmu_idx = CPU_MMU_INDEX; 296 if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != 297 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 298 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx); 250 299 } else { 251 physaddr = addr + env->tlb_table[ is_user][index].addend;300 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; 252 301 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); 253 302 } -
trunk/src/recompiler_new/softmmu_template.h
r13382 r13731 57 57 #endif 58 58 59 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, 60 int is_user,59 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, 60 int mmu_idx, 61 61 void *retaddr); 62 62 #ifndef VBOX … … 207 207 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, 208 208 DATA_TYPE val, 209 int is_user,209 int mmu_idx, 210 210 void *retaddr); 211 211 … … 299 299 } 300 300 301 /* handles all unaligned cases */302 301 /* handles all unaligned cases */ 303 302 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, -
trunk/src/recompiler_new/target-i386/cpu.h
r13382 r13731 635 635 uint32_t smbase; 636 636 int old_exception; /* exception in flight */ 637 638 637 639 638 CPU_COMMON
Note:
See TracChangeset
for help on using the changeset viewer.

