VirtualBox

source: vbox/trunk/src/recompiler_new/softmmu_template.h@ 13762

Last change on this file since 13762 was 13731, checked in by vboxsync, 16 years ago

more cleanup, 32-bit mode still doesn't work.
Very hard to tell why.

  • Property svn:eol-style set to native
File size: 13.0 KB
Line 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define DATA_SIZE (1 << SHIFT)
30
31#if DATA_SIZE == 8
32#define SUFFIX q
33#define USUFFIX q
34#define DATA_TYPE uint64_t
35#elif DATA_SIZE == 4
36#define SUFFIX l
37#define USUFFIX l
38#define DATA_TYPE uint32_t
39#elif DATA_SIZE == 2
40#define SUFFIX w
41#define USUFFIX uw
42#define DATA_TYPE uint16_t
43#elif DATA_SIZE == 1
44#define SUFFIX b
45#define USUFFIX ub
46#define DATA_TYPE uint8_t
47#else
48#error unsupported data size
49#endif
50
51#ifdef SOFTMMU_CODE_ACCESS
52#define READ_ACCESS_TYPE 2
53#define ADDR_READ addr_code
54#else
55#define READ_ACCESS_TYPE 0
56#define ADDR_READ addr_read
57#endif
58
59static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
60 int mmu_idx,
61 void *retaddr);
62#ifndef VBOX
63static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
64 target_ulong addr,
65 void *retaddr)
66#else
67DECLINLINE(DATA_TYPE) glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
68 target_ulong addr,
69 void *retaddr)
70#endif
71{
72 DATA_TYPE res;
73 int index;
74 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
75 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
76 env->mem_io_pc = (unsigned long)retaddr;
77 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
78 && !can_do_io(env)) {
79 cpu_io_recompile(env, retaddr);
80 }
81
82#if SHIFT <= 2
83 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
84#else
85#ifdef TARGET_WORDS_BIGENDIAN
86 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
87 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
88#else
89 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
90 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
91#endif
92#endif /* SHIFT > 2 */
93#ifdef USE_KQEMU
94 env->last_io_time = cpu_get_time_fast();
95#endif
96 return res;
97}
98
99/* handle all cases except unaligned access which span two pages */
100DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
101 int mmu_idx)
102{
103 DATA_TYPE res;
104 int index;
105 target_ulong tlb_addr;
106 target_phys_addr_t addend;
107 void *retaddr;
108
109 /* test if there is match for unaligned or IO access */
110 /* XXX: could done more in memory macro in a non portable way */
111 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
112 redo:
113 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
114 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
115 if (tlb_addr & ~TARGET_PAGE_MASK) {
116 /* IO access */
117 if ((addr & (DATA_SIZE - 1)) != 0)
118 goto do_unaligned_access;
119 retaddr = GETPC();
120 addend = env->iotlb[mmu_idx][index];
121 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
122 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
123 /* slow unaligned access (it spans two pages or IO) */
124 do_unaligned_access:
125 retaddr = GETPC();
126#ifdef ALIGNED_ONLY
127 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
128#endif
129 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
130 mmu_idx, retaddr);
131 } else {
132 /* unaligned/aligned access in the same page */
133#ifdef ALIGNED_ONLY
134 if ((addr & (DATA_SIZE - 1)) != 0) {
135 retaddr = GETPC();
136 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
137 }
138#endif
139 addend = env->tlb_table[mmu_idx][index].addend;
140 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
141 }
142 } else {
143 /* the page is not in the TLB : fill it */
144 retaddr = GETPC();
145#ifdef ALIGNED_ONLY
146 if ((addr & (DATA_SIZE - 1)) != 0)
147 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
148#endif
149 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
150 goto redo;
151 }
152 return res;
153}
154
155/* handle all unaligned cases */
156static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
157 int mmu_idx,
158 void *retaddr)
159{
160 DATA_TYPE res, res1, res2;
161 int index, shift;
162 target_phys_addr_t addend;
163 target_ulong tlb_addr, addr1, addr2;
164
165 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
166 redo:
167 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
168 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
169 if (tlb_addr & ~TARGET_PAGE_MASK) {
170 /* IO access */
171 if ((addr & (DATA_SIZE - 1)) != 0)
172 goto do_unaligned_access;
173 retaddr = GETPC();
174 addend = env->iotlb[mmu_idx][index];
175 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
176 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
177 do_unaligned_access:
178 /* slow unaligned access (it spans two pages) */
179 addr1 = addr & ~(DATA_SIZE - 1);
180 addr2 = addr1 + DATA_SIZE;
181 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
182 mmu_idx, retaddr);
183 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
184 mmu_idx, retaddr);
185 shift = (addr & (DATA_SIZE - 1)) * 8;
186#ifdef TARGET_WORDS_BIGENDIAN
187 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
188#else
189 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
190#endif
191 res = (DATA_TYPE)res;
192 } else {
193 /* unaligned/aligned access in the same page */
194 addend = env->tlb_table[mmu_idx][index].addend;
195 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
196 }
197 } else {
198 /* the page is not in the TLB : fill it */
199 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
200 goto redo;
201 }
202 return res;
203}
204
205#ifndef SOFTMMU_CODE_ACCESS
206
207static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
208 DATA_TYPE val,
209 int mmu_idx,
210 void *retaddr);
211
212#ifndef VBOX
213static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
214 DATA_TYPE val,
215 target_ulong addr,
216 void *retaddr)
217#else
218DECLINLINE(void) glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
219 DATA_TYPE val,
220 target_ulong addr,
221 void *retaddr)
222#endif
223{
224 int index;
225 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
226 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
227 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
228 && !can_do_io(env)) {
229 cpu_io_recompile(env, retaddr);
230 }
231
232 env->mem_io_vaddr = addr;
233 env->mem_io_pc = (unsigned long)retaddr;
234#if SHIFT <= 2
235 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
236#else
237#ifdef TARGET_WORDS_BIGENDIAN
238 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
239 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
240#else
241 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
242 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
243#endif
244#endif /* SHIFT > 2 */
245#ifdef USE_KQEMU
246 env->last_io_time = cpu_get_time_fast();
247#endif
248}
249
250void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
251 DATA_TYPE val,
252 int mmu_idx)
253{
254 target_phys_addr_t addend;
255 target_ulong tlb_addr;
256 void *retaddr;
257 int index;
258
259 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
260 redo:
261 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
262 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
263 if (tlb_addr & ~TARGET_PAGE_MASK) {
264 /* IO access */
265 if ((addr & (DATA_SIZE - 1)) != 0)
266 goto do_unaligned_access;
267 retaddr = GETPC();
268 addend = env->iotlb[mmu_idx][index];
269 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
270 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
271 do_unaligned_access:
272 retaddr = GETPC();
273#ifdef ALIGNED_ONLY
274 do_unaligned_access(addr, 1, mmu_idx, retaddr);
275#endif
276 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
277 mmu_idx, retaddr);
278 } else {
279 /* aligned/unaligned access in the same page */
280#ifdef ALIGNED_ONLY
281 if ((addr & (DATA_SIZE - 1)) != 0) {
282 retaddr = GETPC();
283 do_unaligned_access(addr, 1, mmu_idx, retaddr);
284 }
285#endif
286 addend = env->tlb_table[mmu_idx][index].addend;
287 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
288 }
289 } else {
290 /* the page is not in the TLB : fill it */
291 retaddr = GETPC();
292#ifdef ALIGNED_ONLY
293 if ((addr & (DATA_SIZE - 1)) != 0)
294 do_unaligned_access(addr, 1, mmu_idx, retaddr);
295#endif
296 tlb_fill(addr, 1, mmu_idx, retaddr);
297 goto redo;
298 }
299}
300
301/* handles all unaligned cases */
302static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
303 DATA_TYPE val,
304 int mmu_idx,
305 void *retaddr)
306{
307 target_phys_addr_t addend;
308 target_ulong tlb_addr;
309 int index, i;
310
311 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
312 redo:
313 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
314 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
315 if (tlb_addr & ~TARGET_PAGE_MASK) {
316 /* IO access */
317 if ((addr & (DATA_SIZE - 1)) != 0)
318 goto do_unaligned_access;
319 addend = env->iotlb[mmu_idx][index];
320 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
321 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
322 do_unaligned_access:
323 /* XXX: not efficient, but simple */
324 /* Note: relies on the fact that tlb_fill() does not remove the
325 * previous page from the TLB cache. */
326 for(i = DATA_SIZE - 1; i >= 0; i--) {
327#ifdef TARGET_WORDS_BIGENDIAN
328 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
329 mmu_idx, retaddr);
330#else
331 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
332 mmu_idx, retaddr);
333#endif
334 }
335 } else {
336 /* aligned/unaligned access in the same page */
337 addend = env->tlb_table[mmu_idx][index].addend;
338 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
339 }
340 } else {
341 /* the page is not in the TLB : fill it */
342 tlb_fill(addr, 1, mmu_idx, retaddr);
343 goto redo;
344 }
345}
346
347#endif /* !defined(SOFTMMU_CODE_ACCESS) */
348
349#undef READ_ACCESS_TYPE
350#undef SHIFT
351#undef DATA_TYPE
352#undef SUFFIX
353#undef USUFFIX
354#undef DATA_SIZE
355#undef ADDR_READ
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use