VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm

Last change on this file was 106061, checked in by vboxsync, 3 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 10.6 KB
Line 
1; $Id: VMMR0JmpA-amd64.asm 106061 2024-09-16 14:03:52Z vboxsync $
2;; @file
3; VMM - R0 SetJmp / LongJmp routines for AMD64.
4;
5
6;
7; Copyright (C) 2006-2024 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
27
28;*********************************************************************************************************************************
29;* Header Files *
30;*********************************************************************************************************************************
31%define RT_ASM_WITH_SEH64_ALT
32%include "VBox/asmdefs.mac"
33%include "VMMInternal.mac"
34%include "VBox/err.mac"
35%include "VBox/param.mac"
36
37
38BEGINCODE
39
40;;
41; The setjmp variant used for calling Ring-3.
42;
43; This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
44; in the middle of a ring-3 call. Another differences is the function pointer and
45; argument. This has to do with resuming code and the stack frame of the caller.
46;
47; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
48; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
49; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
50; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
51; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
52;
53GLOBALNAME vmmR0CallRing3SetJmp2
54GLOBALNAME vmmR0CallRing3SetJmpEx
55BEGINPROC vmmR0CallRing3SetJmp
56 ;
57 ; Save the registers.
58 ;
59 push rbp
60 SEH64_PUSH_xBP
61 mov rbp, rsp
62 SEH64_SET_FRAME_xBP 0
63 %ifdef ASM_CALL64_MSC
64 sub rsp, 30h ; (10h is used by resume (??), 20h for callee spill area)
65 SEH64_ALLOCATE_STACK 30h
66SEH64_END_PROLOGUE
67 mov r11, rdx ; pfn
68 mov rdx, rcx ; pJmpBuf;
69 %else
70 sub rsp, 10h ; (10h is used by resume (??))
71 SEH64_ALLOCATE_STACK 10h
72SEH64_END_PROLOGUE
73 mov r8, rdx ; pvUser1 (save it like MSC)
74 mov r9, rcx ; pvUser2 (save it like MSC)
75 mov r11, rsi ; pfn
76 mov rdx, rdi ; pJmpBuf
77 %endif
78 mov [xDX + VMMR0JMPBUF.rbx], rbx
79 %ifdef ASM_CALL64_MSC
80 mov [xDX + VMMR0JMPBUF.rsi], rsi
81 mov [xDX + VMMR0JMPBUF.rdi], rdi
82 %endif
83 mov [xDX + VMMR0JMPBUF.rbp], rbp
84 mov [xDX + VMMR0JMPBUF.r12], r12
85 mov [xDX + VMMR0JMPBUF.r13], r13
86 mov [xDX + VMMR0JMPBUF.r14], r14
87 mov [xDX + VMMR0JMPBUF.r15], r15
88 mov xAX, [rbp + 8] ; (not really necessary, except for validity check)
89 mov [xDX + VMMR0JMPBUF.rip], xAX
90 %ifdef ASM_CALL64_MSC
91 lea r10, [rsp + 20h] ; Must skip the callee spill area.
92 %else
93 mov r10, rsp
94 %endif
95 mov [xDX + VMMR0JMPBUF.rsp], r10
96 %ifdef RT_OS_WINDOWS
97 movdqa [xDX + VMMR0JMPBUF.xmm6], xmm6
98 movdqa [xDX + VMMR0JMPBUF.xmm7], xmm7
99 movdqa [xDX + VMMR0JMPBUF.xmm8], xmm8
100 movdqa [xDX + VMMR0JMPBUF.xmm9], xmm9
101 movdqa [xDX + VMMR0JMPBUF.xmm10], xmm10
102 movdqa [xDX + VMMR0JMPBUF.xmm11], xmm11
103 movdqa [xDX + VMMR0JMPBUF.xmm12], xmm12
104 movdqa [xDX + VMMR0JMPBUF.xmm13], xmm13
105 movdqa [xDX + VMMR0JMPBUF.xmm14], xmm14
106 movdqa [xDX + VMMR0JMPBUF.xmm15], xmm15
107 %endif
108 pushf
109 pop xAX
110 mov [xDX + VMMR0JMPBUF.rflags], xAX
111
112 ;
113 ; Save the call then make it.
114 ;
115 mov [xDX + VMMR0JMPBUF.pfn], r11
116 mov [xDX + VMMR0JMPBUF.pvUser1], r8
117 mov [xDX + VMMR0JMPBUF.pvUser2], r9
118
119 mov r12, rdx ; Save pJmpBuf.
120 %ifdef ASM_CALL64_MSC
121 mov rcx, r8 ; pvUser -> arg0
122 mov rdx, r9
123 %else
124 mov rdi, r8 ; pvUser -> arg0
125 mov rsi, r9
126 %endif
127 call r11
128 mov rdx, r12 ; Restore pJmpBuf
129
130 ;
131 ; Return like in the long jump but clear eip, no shortcuts here.
132 ;
133.proper_return:
134%ifdef RT_OS_WINDOWS
135 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
136 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
137 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
138 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
139 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
140 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
141 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
142 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
143 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
144 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
145%endif
146 mov rbx, [xDX + VMMR0JMPBUF.rbx]
147%ifdef ASM_CALL64_MSC
148 mov rsi, [xDX + VMMR0JMPBUF.rsi]
149 mov rdi, [xDX + VMMR0JMPBUF.rdi]
150%endif
151 mov r12, [xDX + VMMR0JMPBUF.r12]
152 mov r13, [xDX + VMMR0JMPBUF.r13]
153 mov r14, [xDX + VMMR0JMPBUF.r14]
154 mov r15, [xDX + VMMR0JMPBUF.r15]
155 mov rbp, [xDX + VMMR0JMPBUF.rbp]
156 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
157 mov rsp, [xDX + VMMR0JMPBUF.rsp]
158 push qword [xDX + VMMR0JMPBUF.rflags]
159 popf
160 leave
161 ret
162ENDPROC vmmR0CallRing3SetJmp
163
164
165;;
166; Worker for VMMRZCallRing3.
167; This will save the stack and registers.
168;
169; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
170; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
171;
172BEGINPROC vmmR0CallRing3LongJmp
173 ;
174 ; Save the registers on the stack.
175 ;
176 push rbp
177 SEH64_PUSH_xBP
178 mov rbp, rsp
179 SEH64_SET_FRAME_xBP 0
180 push r15
181 SEH64_PUSH_GREG r15
182 push r14
183 SEH64_PUSH_GREG r14
184 push r13
185 SEH64_PUSH_GREG r13
186 push r12
187 SEH64_PUSH_GREG r12
188%ifdef ASM_CALL64_MSC
189 push rdi
190 SEH64_PUSH_GREG rdi
191 push rsi
192 SEH64_PUSH_GREG rsi
193%endif
194 push rbx
195 SEH64_PUSH_GREG rbx
196 pushf
197 SEH64_ALLOCATE_STACK 8
198%ifdef RT_OS_WINDOWS
199 sub rsp, 0a0h
200 SEH64_ALLOCATE_STACK 0a0h
201 movdqa [rsp + 000h], xmm6
202 movdqa [rsp + 010h], xmm7
203 movdqa [rsp + 020h], xmm8
204 movdqa [rsp + 030h], xmm9
205 movdqa [rsp + 040h], xmm10
206 movdqa [rsp + 050h], xmm11
207 movdqa [rsp + 060h], xmm12
208 movdqa [rsp + 070h], xmm13
209 movdqa [rsp + 080h], xmm14
210 movdqa [rsp + 090h], xmm15
211%endif
212SEH64_END_PROLOGUE
213
214 ;
215 ; Normalize the parameters.
216 ;
217%ifdef ASM_CALL64_MSC
218 mov eax, edx ; rc
219 mov rdx, rcx ; pJmpBuf
220%else
221 mov rdx, rdi ; pJmpBuf
222 mov eax, esi ; rc
223%endif
224
225 ;
226 ; Is the jump buffer armed?
227 ;
228 cmp qword [xDX + VMMR0JMPBUF.rip], byte 0
229 je .nok
230
231 ;
232 ; Also check that the stack is in the vicinity of the RSP we entered
233 ; on so the stack mirroring below doesn't go wild.
234 ;
235 mov rsi, rsp
236 mov rcx, [xDX + VMMR0JMPBUF.rsp]
237 sub rcx, rsi
238 cmp rcx, _64K
239 jnbe .nok
240
241 ;
242 ; Save a PC and return PC here to assist unwinding.
243 ;
244.unwind_point:
245 lea rcx, [.unwind_point wrt RIP]
246 mov [xDX + VMMR0JMPBUF.UnwindPc], rcx
247 mov rcx, [xDX + VMMR0JMPBUF.rbp]
248 lea rcx, [rcx + 8]
249 mov [xDX + VMMR0JMPBUF.UnwindRetPcLocation], rcx
250 mov rcx, [rcx]
251 mov [xDX + VMMR0JMPBUF.UnwindRetPcValue], rcx
252
253 ; Save RSP & RBP to enable stack dumps
254 mov [xDX + VMMR0JMPBUF.UnwindSp], rsp
255 mov rcx, rbp
256 mov [xDX + VMMR0JMPBUF.UnwindBp], rcx
257 sub rcx, 8
258 mov [xDX + VMMR0JMPBUF.UnwindRetSp], rcx
259
260 ;
261 ; Make sure the direction flag is clear before we do any rep movsb below.
262 ;
263 cld
264
265 ;
266 ; Mirror the stack.
267 ;
268 xor ebx, ebx
269
270 mov rdi, [xDX + VMMR0JMPBUF.pvStackBuf]
271 or rdi, rdi
272 jz .skip_stack_mirroring
273
274 mov ebx, [xDX + VMMR0JMPBUF.cbStackBuf]
275 or ebx, ebx
276 jz .skip_stack_mirroring
277
278 mov rcx, [xDX + VMMR0JMPBUF.rsp]
279 sub rcx, rsp
280 and rcx, ~0fffh ; copy up to the page boundrary
281
282 cmp rcx, rbx ; rbx = rcx = RT_MIN(rbx, rcx);
283 jbe .do_stack_buffer_big_enough
284 mov ecx, ebx ; too much to copy, limit to ebx
285 jmp .do_stack_copying
286.do_stack_buffer_big_enough:
287 mov ebx, ecx ; ecx is smaller, update ebx for cbStackValid
288
289.do_stack_copying:
290 mov rsi, rsp
291 rep movsb
292
293.skip_stack_mirroring:
294 mov [xDX + VMMR0JMPBUF.cbStackValid], ebx
295
296 ;
297 ; Do buffer mirroring.
298 ;
299 mov rdi, [xDX + VMMR0JMPBUF.pMirrorBuf]
300 or rdi, rdi
301 jz .skip_buffer_mirroring
302 mov rsi, rdx
303 mov ecx, VMMR0JMPBUF_size
304 rep movsb
305.skip_buffer_mirroring:
306
307 ;
308 ; Do the long jump.
309 ;
310%ifdef RT_OS_WINDOWS
311 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
312 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
313 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
314 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
315 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
316 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
317 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
318 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
319 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
320 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
321%endif
322 mov rbx, [xDX + VMMR0JMPBUF.rbx]
323%ifdef ASM_CALL64_MSC
324 mov rsi, [xDX + VMMR0JMPBUF.rsi]
325 mov rdi, [xDX + VMMR0JMPBUF.rdi]
326%endif
327 mov r12, [xDX + VMMR0JMPBUF.r12]
328 mov r13, [xDX + VMMR0JMPBUF.r13]
329 mov r14, [xDX + VMMR0JMPBUF.r14]
330 mov r15, [xDX + VMMR0JMPBUF.r15]
331 mov rbp, [xDX + VMMR0JMPBUF.rbp]
332 mov rsp, [xDX + VMMR0JMPBUF.rsp]
333 push qword [xDX + VMMR0JMPBUF.rflags]
334 popf
335 leave
336 ret
337
338 ;
339 ; Failure
340 ;
341.nok:
342 mov eax, VERR_VMM_LONG_JMP_ERROR
343%ifdef RT_OS_WINDOWS
344 add rsp, 0a0h ; skip XMM registers since they are unmodified.
345%endif
346 popf
347 pop rbx
348%ifdef ASM_CALL64_MSC
349 pop rsi
350 pop rdi
351%endif
352 pop r12
353 pop r13
354 pop r14
355 pop r15
356 leave
357 ret
358ENDPROC vmmR0CallRing3LongJmp
359
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette