VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp@ 76553

Last change on this file since 76553 was 76553, checked in by vboxsync, 5 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 58.7 KB
RevLine 
[23]1/* $Id: PATMPatch.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
[1]2/** @file
3 * PATMPatch - Dynamic Guest OS Instruction patches
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
[76553]9 * Copyright (C) 2006-2019 Oracle Corporation
[1]10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
[5999]14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
[1]18 */
19
[57358]20
21/*********************************************************************************************************************************
22* Header Files *
23*********************************************************************************************************************************/
[1]24#define LOG_GROUP LOG_GROUP_PATM
[35346]25#include <VBox/vmm/patm.h>
[54763]26#include <VBox/vmm/pdmapi.h>
[35346]27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/cpum.h>
29#include <VBox/vmm/mm.h>
[45276]30#include <VBox/vmm/em.h>
[35346]31#include <VBox/vmm/trpm.h>
[46150]32#include <VBox/vmm/csam.h>
[1]33#include "PATMInternal.h"
[35346]34#include <VBox/vmm/vm.h>
[46150]35#include <VBox/param.h>
[1]36
37#include <VBox/err.h>
38#include <VBox/log.h>
[46150]39#include <VBox/dis.h>
40#include <VBox/disopcode.h>
41
[1]42#include <iprt/assert.h>
43#include <iprt/asm.h>
[23]44#include <iprt/string.h>
[1]45
46#include "PATMA.h"
47#include "PATMPatch.h"
48
[54688]49
[57358]50/*********************************************************************************************************************************
51* Structures and Typedefs *
52*********************************************************************************************************************************/
[54688]53/**
54 * Internal structure for passing more information about call fixups to
55 * patmPatchGenCode.
56 */
[1]57typedef struct
58{
[9228]59 RTRCPTR pTargetGC;
60 RTRCPTR pCurInstrGC;
61 RTRCPTR pNextInstrGC;
62 RTRCPTR pReturnGC;
[1]63} PATMCALLINFO, *PPATMCALLINFO;
64
[54688]65
[57358]66/*********************************************************************************************************************************
67* Defined Constants And Macros *
68*********************************************************************************************************************************/
[54746]69/** Value to use when not sure about the patch size. */
70#define PATCHGEN_DEF_SIZE 256
71
72#define PATCHGEN_PROLOG_NODEF(pVM, pPatch, a_cbMaxEmit) \
[54688]73 do { \
[54746]74 cbGivenPatchSize = (a_cbMaxEmit) + 16U /*jmp++*/; \
75 if (RT_LIKELY((pPatch)->pPatchBlockOffset + pPatch->uCurPatchOffset + cbGivenPatchSize < pVM->patm.s.cbPatchMem)) \
76 pPB = PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset; \
77 else \
[54688]78 { \
79 pVM->patm.s.fOutOfMemory = true; \
[54746]80 AssertMsgFailed(("offPatch=%#x + offEmit=%#x + a_cbMaxEmit=%#x + jmp --> cbTotalWithFudge=%#x >= cbPatchMem=%#x", \
81 (pPatch)->pPatchBlockOffset, pPatch->uCurPatchOffset, a_cbMaxEmit, \
82 (pPatch)->pPatchBlockOffset + pPatch->uCurPatchOffset + cbGivenPatchSize, pVM->patm.s.cbPatchMem)); \
[54688]83 return VERR_NO_MEMORY; \
84 } \
85 } while (0)
86
[54746]87#define PATCHGEN_PROLOG(pVM, pPatch, a_cbMaxEmit) \
[54688]88 uint8_t *pPB; \
[54746]89 uint32_t cbGivenPatchSize; \
90 PATCHGEN_PROLOG_NODEF(pVM, pPatch, a_cbMaxEmit)
[54688]91
[54746]92#define PATCHGEN_EPILOG(pPatch, a_cbActual) \
[54688]93 do { \
[54746]94 AssertMsg((a_cbActual) <= cbGivenPatchSize, ("a_cbActual=%#x cbGivenPatchSize=%#x\n", a_cbActual, cbGivenPatchSize)); \
95 Assert((a_cbActual) <= 640); \
96 pPatch->uCurPatchOffset += (a_cbActual); \
[54688]97 } while (0)
98
99
100
101
102int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType,
103 RTRCPTR pSource /*= 0*/, RTRCPTR pDest /*= 0*/)
[1]104{
105 PRELOCREC pRec;
106
[54714]107 Assert( uType == FIXUP_ABSOLUTE
[54763]108 || ( ( uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL
109 || uType == FIXUP_CONSTANT_IN_PATCH_ASM_TMPL
110 || uType == FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL)
[54762]111 && pSource == pDest
[54764]112 && PATM_IS_ASMFIX(pSource))
[54714]113 || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
[1]114
[13822]115 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
[1]116
117 pRec = (PRELOCREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
118 Assert(pRec);
119 pRec->Core.Key = (AVLPVKEY)pRelocHC;
[63560]120 pRec->pRelocPos = pRelocHC; /** @todo redundant. */
[1]121 pRec->pSource = pSource;
122 pRec->pDest = pDest;
123 pRec->uType = uType;
124
125 bool ret = RTAvlPVInsert(&pPatch->FixupTree, &pRec->Core);
126 Assert(ret); NOREF(ret);
127 pPatch->nrFixups++;
128
129 return VINF_SUCCESS;
130}
131
[9228]132int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTRCPTR pTargetGC, uint32_t opcode)
[1]133{
134 PJUMPREC pRec;
135
136 pRec = (PJUMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
137 Assert(pRec);
138
139 pRec->Core.Key = (AVLPVKEY)pJumpHC;
[63560]140 pRec->pJumpHC = pJumpHC; /** @todo redundant. */
[1]141 pRec->offDispl = offset;
142 pRec->pTargetGC = pTargetGC;
143 pRec->opcode = opcode;
144
145 bool ret = RTAvlPVInsert(&pPatch->JumpTree, &pRec->Core);
146 Assert(ret); NOREF(ret);
147 pPatch->nrJumpRecs++;
148
149 return VINF_SUCCESS;
150}
151
[54686]152static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PCPATCHASMRECORD pAsmRecord,
153 RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fGenJump,
[1]154 PPATMCALLINFO pCallInfo = 0)
155{
156 Assert(fGenJump == false || pReturnAddrGC);
157 Assert(fGenJump == false || pAsmRecord->offJump);
[54686]158 Assert(pAsmRecord);
159 Assert(pAsmRecord->cbFunction > sizeof(pAsmRecord->aRelocs[0].uType) * pAsmRecord->cRelocs);
[1]160
161 // Copy the code block
[54686]162 memcpy(pPB, pAsmRecord->pbFunction, pAsmRecord->cbFunction);
[1]163
164 // Process all fixups
[54686]165 uint32_t i, j;
166 for (j = 0, i = 0; i < pAsmRecord->cRelocs; i++)
[1]167 {
[54686]168 for (; j < pAsmRecord->cbFunction; j++)
[1]169 {
[54686]170 if (*(uint32_t*)&pPB[j] == pAsmRecord->aRelocs[i].uType)
[1]171 {
[9212]172 RCPTRTYPE(uint32_t *) dest;
[1]173
174#ifdef VBOX_STRICT
[54764]175 if (pAsmRecord->aRelocs[i].uType == PATM_ASMFIX_FIXUP)
[54686]176 Assert(pAsmRecord->aRelocs[i].uInfo != 0);
[1]177 else
[54686]178 Assert(pAsmRecord->aRelocs[i].uInfo == 0);
[1]179#endif
180
[54714]181 /*
182 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING
183 * A SAVED STATE WITH A DIFFERENT HYPERVISOR LAYOUT.
[11979]184 */
[54761]185 uint32_t uRelocType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
[54686]186 switch (pAsmRecord->aRelocs[i].uType)
[1]187 {
[54714]188 /*
189 * PATMGCSTATE member fixups.
190 */
[54764]191 case PATM_ASMFIX_VMFLAGS:
[54714]192 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
193 break;
[54764]194 case PATM_ASMFIX_PENDINGACTION:
[54714]195 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
196 break;
[54764]197 case PATM_ASMFIX_STACKPTR:
[54714]198 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
199 break;
[54764]200 case PATM_ASMFIX_INTERRUPTFLAG:
[54714]201 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
202 break;
[54764]203 case PATM_ASMFIX_INHIBITIRQADDR:
[54714]204 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
205 break;
[54764]206 case PATM_ASMFIX_TEMP_EAX:
[54714]207 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
208 break;
[54764]209 case PATM_ASMFIX_TEMP_ECX:
[54714]210 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
211 break;
[54764]212 case PATM_ASMFIX_TEMP_EDI:
[54714]213 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
214 break;
[54764]215 case PATM_ASMFIX_TEMP_EFLAGS:
[54714]216 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
217 break;
[54764]218 case PATM_ASMFIX_TEMP_RESTORE_FLAGS:
[54714]219 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
220 break;
[54764]221 case PATM_ASMFIX_CALL_PATCH_TARGET_ADDR:
[54714]222 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
223 break;
[54764]224 case PATM_ASMFIX_CALL_RETURN_ADDR:
[54714]225 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
226 break;
[1]227#ifdef VBOX_WITH_STATISTICS
[54764]228 case PATM_ASMFIX_ALLPATCHCALLS:
[54714]229 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
230 break;
[54764]231 case PATM_ASMFIX_IRETEFLAGS:
[54714]232 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
233 break;
[54764]234 case PATM_ASMFIX_IRETCS:
[54714]235 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
236 break;
[54764]237 case PATM_ASMFIX_IRETEIP:
[54714]238 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
239 break;
240#endif
[1]241
242
[54764]243 case PATM_ASMFIX_FIXUP:
[54714]244 /* Offset in aRelocs[i].uInfo is from the base of the function. */
245 dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo
246 + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
247 break;
[1]248
[54714]249#ifdef VBOX_WITH_STATISTICS
[54764]250 case PATM_ASMFIX_PERPATCHCALLS:
[54714]251 dest = patmPatchQueryStatAddress(pVM, pPatch);
252 break;
[1]253#endif
254
[54714]255 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
256 * part to store the original return addresses.
257 */
[54764]258 case PATM_ASMFIX_STACKBASE:
[54714]259 dest = pVM->patm.s.pGCStackGC;
260 break;
[1]261
[54764]262 case PATM_ASMFIX_STACKBASE_GUEST:
[54714]263 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
264 break;
[1]265
[54764]266 case PATM_ASMFIX_RETURNADDR: /* absolute guest address; no fixup required */
267 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_ASMFIX_NO_FIXUP);
[54714]268 dest = pCallInfo->pReturnGC;
269 break;
[1]270
[54764]271 case PATM_ASMFIX_PATCHNEXTBLOCK: /* relative address of instruction following this block */
272 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_ASMFIX_NO_FIXUP);
[1]273
[54714]274 /** @note hardcoded assumption that we must return to the instruction following this block */
275 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction;
276 break;
[1]277
[54764]278 case PATM_ASMFIX_CALLTARGET: /* relative to patch address; no fixup required */
279 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_ASMFIX_NO_FIXUP);
[1]280
[54714]281 /* Address must be filled in later. (see patmr3SetBranchTargets) */
282 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
283 dest = PATM_ILLEGAL_DESTINATION;
284 break;
[1]285
[54764]286 case PATM_ASMFIX_PATCHBASE: /* Patch GC base address */
[54714]287 dest = pVM->patm.s.pPatchMemGC;
288 break;
[1]289
[54764]290 case PATM_ASMFIX_NEXTINSTRADDR:
[54714]291 Assert(pCallInfo);
292 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
293 dest = pCallInfo->pNextInstrGC;
294 break;
[1]295
[54764]296 case PATM_ASMFIX_CURINSTRADDR:
[54714]297 Assert(pCallInfo);
298 dest = pCallInfo->pCurInstrGC;
299 break;
[1]300
[54714]301 /* Relative address of global patm lookup and call function. */
[54764]302 case PATM_ASMFIX_LOOKUP_AND_CALL_FUNCTION:
[54714]303 {
304 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
305 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
306 Assert(pVM->patm.s.pfnHelperCallGC);
307 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
[5285]308
[54714]309 /* Relative value is target minus address of instruction after the actual call instruction. */
310 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
311 break;
312 }
[1]313
[54764]314 case PATM_ASMFIX_RETURN_FUNCTION:
[54714]315 {
316 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
317 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
318 Assert(pVM->patm.s.pfnHelperRetGC);
319 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
[1]320
[54714]321 /* Relative value is target minus address of instruction after the actual call instruction. */
322 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
323 break;
324 }
[1]325
[54764]326 case PATM_ASMFIX_IRET_FUNCTION:
[54714]327 {
328 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
329 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
330 Assert(pVM->patm.s.pfnHelperIretGC);
331 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
[5285]332
[54714]333 /* Relative value is target minus address of instruction after the actual call instruction. */
334 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
335 break;
336 }
[1]337
[54764]338 case PATM_ASMFIX_LOOKUP_AND_JUMP_FUNCTION:
[54714]339 {
340 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
341 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
342 Assert(pVM->patm.s.pfnHelperJumpGC);
343 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
[1]344
[54714]345 /* Relative value is target minus address of instruction after the actual call instruction. */
346 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
347 break;
348 }
[1]349
[54764]350 case PATM_ASMFIX_CPUID_STD_MAX: /* saved state only */
[54714]351 dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM);
352 break;
[54764]353 case PATM_ASMFIX_CPUID_EXT_MAX: /* saved state only */
[54714]354 dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM);
355 break;
[54764]356 case PATM_ASMFIX_CPUID_CENTAUR_MAX: /* saved state only */
[54714]357 dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM);
358 break;
[1]359
[54714]360 /*
361 * The following fixups needs to be recalculated when loading saved state
[54761]362 * Note! Earlier saved state versions had different hacks for detecting some of these.
[54714]363 */
[54764]364 case PATM_ASMFIX_VM_FORCEDACTIONS:
[54714]365 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
366 break;
[54763]367
[54764]368 case PATM_ASMFIX_CPUID_DEF_PTR: /* saved state only */
[54714]369 dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
370 break;
[54764]371 case PATM_ASMFIX_CPUID_STD_PTR: /* saved state only */
[54714]372 dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
373 break;
[54764]374 case PATM_ASMFIX_CPUID_EXT_PTR: /* saved state only */
[54714]375 dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
376 break;
[54764]377 case PATM_ASMFIX_CPUID_CENTAUR_PTR: /* saved state only */
[54714]378 dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
379 break;
[1]380
[54761]381 /*
[54763]382 * The following fixups are constants and helper code calls that only
383 * needs to be corrected when loading saved state.
[54761]384 */
[54763]385 case PATM_ASMFIX_HELPER_CPUM_CPUID:
386 {
387 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "CPUMPatchHlpCpuId", &dest);
388 AssertReleaseRCBreakStmt(rc, dest = PATM_ILLEGAL_DESTINATION);
389 uRelocType = FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL;
[54761]390 break;
[54763]391 }
[54761]392
393 /*
394 * Unknown fixup.
395 */
[54763]396 case PATM_ASMFIX_REUSE_LATER_0:
397 case PATM_ASMFIX_REUSE_LATER_1:
398 case PATM_ASMFIX_REUSE_LATER_2:
399 case PATM_ASMFIX_REUSE_LATER_3:
[54714]400 default:
[54761]401 AssertReleaseMsgFailed(("Unknown fixup: %#x\n", pAsmRecord->aRelocs[i].uType));
[54714]402 dest = PATM_ILLEGAL_DESTINATION;
403 break;
[1]404 }
405
[54763]406 if (uRelocType == FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL)
407 {
408 RTRCUINTPTR RCPtrAfter = pVM->patm.s.pPatchMemGC
409 + (RTRCUINTPTR)(&pPB[j + sizeof(RTRCPTR)] - pVM->patm.s.pPatchMemHC);
410 dest -= RCPtrAfter;
411 }
412
413 *(PRTRCPTR)&pPB[j] = dest;
414
[54764]415 if (pAsmRecord->aRelocs[i].uType < PATM_ASMFIX_NO_FIXUP)
[1]416 {
[54761]417 patmPatchAddReloc32(pVM, pPatch, &pPB[j], uRelocType,
[54714]418 pAsmRecord->aRelocs[i].uType /*pSources*/, pAsmRecord->aRelocs[i].uType /*pDest*/);
[1]419 }
420 break;
421 }
422 }
[54686]423 Assert(j < pAsmRecord->cbFunction);
[1]424 }
[54686]425 Assert(pAsmRecord->aRelocs[i].uInfo == 0xffffffff);
[1]426
427 /* Add the jump back to guest code (if required) */
428 if (fGenJump)
429 {
430 int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
431
432 /* Add lookup record for patch to guest address translation */
433 Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
[44362]434 patmR3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
[1]435
436 *(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
437 patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
[54688]438 PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32,
439 pReturnAddrGC);
[1]440 }
441
442 // Calculate the right size of this patch block
443 if ((fGenJump && pAsmRecord->offJump) || (!fGenJump && !pAsmRecord->offJump))
[54686]444 return pAsmRecord->cbFunction;
445 // if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
446 return pAsmRecord->cbFunction - SIZEOF_NEARJUMP32;
[1]447}
448
449/* Read bytes and check for overwritten instructions. */
[9228]450static int patmPatchReadBytes(PVM pVM, uint8_t *pDest, RTRCPTR pSrc, uint32_t cb)
[1]451{
[18927]452 int rc = PGMPhysSimpleReadGCPtr(&pVM->aCpus[0], pDest, pSrc, cb);
[1]453 AssertRCReturn(rc, rc);
454 /*
455 * Could be patched already; make sure this is checked!
456 */
457 for (uint32_t i=0;i<cb;i++)
458 {
459 uint8_t temp;
460
461 int rc2 = PATMR3QueryOpcode(pVM, pSrc+i, &temp);
[13816]462 if (RT_SUCCESS(rc2))
[1]463 {
464 pDest[i] = temp;
465 }
466 else
467 break; /* no more */
468 }
469 return VINF_SUCCESS;
470}
471
[9212]472int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
[1]473{
[54746]474 uint32_t const cbInstrShutUpGcc = pCpu->cbInstr;
475 PATCHGEN_PROLOG(pVM, pPatch, cbInstrShutUpGcc);
[1]476
[54746]477 int rc = patmPatchReadBytes(pVM, pPB, pCurInstrGC, cbInstrShutUpGcc);
[1]478 AssertRC(rc);
[41727]479 PATCHGEN_EPILOG(pPatch, cbInstrShutUpGcc);
[1]480 return rc;
481}
482
[9228]483int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, bool fSizeOverride)
[1]484{
485 uint32_t size;
486 PATMCALLINFO callInfo;
[54746]487 PCPATCHASMRECORD pPatchAsmRec = EMIsRawRing1Enabled(pVM) ? &g_patmIretRing1Record : &g_patmIretRecord;
[1]488
[54746]489 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
[1]490
[62652]491 AssertMsg(fSizeOverride == false, ("operand size override!!\n")); RT_NOREF_PV(fSizeOverride);
[1]492 callInfo.pCurInstrGC = pCurInstrGC;
493
[54746]494 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false, &callInfo);
[1]495
496 PATCHGEN_EPILOG(pPatch, size);
497 return VINF_SUCCESS;
498}
499
500int patmPatchGenCli(PVM pVM, PPATCHINFO pPatch)
501{
502 uint32_t size;
[54746]503 PATCHGEN_PROLOG(pVM, pPatch, g_patmCliRecord.cbFunction);
[1]504
[54687]505 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCliRecord, 0, false);
[1]506
507 PATCHGEN_EPILOG(pPatch, size);
508 return VINF_SUCCESS;
509}
510
511/*
512 * Generate an STI patch
513 */
[9228]514int patmPatchGenSti(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RTRCPTR pNextInstrGC)
[1]515{
516 PATMCALLINFO callInfo;
517 uint32_t size;
518
[62652]519 Log(("patmPatchGenSti at %RRv; next %RRv\n", pCurInstrGC, pNextInstrGC)); RT_NOREF_PV(pCurInstrGC);
[54746]520 PATCHGEN_PROLOG(pVM, pPatch, g_patmStiRecord.cbFunction);
[1]521 callInfo.pNextInstrGC = pNextInstrGC;
[54687]522 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmStiRecord, 0, false, &callInfo);
[1]523 PATCHGEN_EPILOG(pPatch, size);
524
525 return VINF_SUCCESS;
526}
527
528
[9212]529int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
[1]530{
531 uint32_t size;
532 PATMCALLINFO callInfo;
[54746]533 PCPATCHASMRECORD pPatchAsmRec;
534 if (fSizeOverride == true)
535 pPatchAsmRec = fGenJumpBack ? &g_patmPopf16Record : &g_patmPopf16Record_NoExit;
536 else
537 pPatchAsmRec = fGenJumpBack ? &g_patmPopf32Record : &g_patmPopf32Record_NoExit;
[1]538
[54746]539 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
[1]540
541 callInfo.pNextInstrGC = pReturnAddrGC;
542
[13822]543 Log(("patmPatchGenPopf at %RRv\n", pReturnAddrGC));
[1]544
[4057]545 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
[1]546 if (fSizeOverride == true)
547 Log(("operand size override!!\n"));
[54746]548 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, pReturnAddrGC, fGenJumpBack, &callInfo);
[1]549
550 PATCHGEN_EPILOG(pPatch, size);
551 STAM_COUNTER_INC(&pVM->patm.s.StatGenPopf);
552 return VINF_SUCCESS;
553}
554
555int patmPatchGenPushf(PVM pVM, PPATCHINFO pPatch, bool fSizeOverride)
556{
557 uint32_t size;
[54746]558 PCPATCHASMRECORD pPatchAsmRec = fSizeOverride == true ? &g_patmPushf16Record : &g_patmPushf32Record;
559 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
[1]560
[54746]561 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
[1]562
563 PATCHGEN_EPILOG(pPatch, size);
564 return VINF_SUCCESS;
565}
566
567int patmPatchGenPushCS(PVM pVM, PPATCHINFO pPatch)
568{
569 uint32_t size;
[54746]570 PATCHGEN_PROLOG(pVM, pPatch, g_patmPushCSRecord.cbFunction);
[54687]571 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmPushCSRecord, 0, false);
[1]572 PATCHGEN_EPILOG(pPatch, size);
573 return VINF_SUCCESS;
574}
575
[9212]576int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
[1]577{
578 uint32_t size = 0;
[54686]579 PCPATCHASMRECORD pPatchAsmRec;
[1]580 switch (opcode)
581 {
582 case OP_LOOP:
[54687]583 pPatchAsmRec = &g_patmLoopRecord;
[1]584 break;
585 case OP_LOOPNE:
[54687]586 pPatchAsmRec = &g_patmLoopNZRecord;
[1]587 break;
588 case OP_LOOPE:
[54687]589 pPatchAsmRec = &g_patmLoopZRecord;
[1]590 break;
591 case OP_JECXZ:
[54687]592 pPatchAsmRec = &g_patmJEcxRecord;
[1]593 break;
594 default:
595 AssertMsgFailed(("PatchGenLoop: invalid opcode %d\n", opcode));
596 return VERR_INVALID_PARAMETER;
597 }
598 Assert(pPatchAsmRec->offSizeOverride && pPatchAsmRec->offRelJump);
599
[54746]600 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
[1]601 Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
602
603 // Generate the patch code
604 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
605
606 if (fSizeOverride)
607 {
608 pPB[pPatchAsmRec->offSizeOverride] = 0x66; // ecx -> cx or vice versa
609 }
610
[9228]611 *(RTRCPTR *)&pPB[pPatchAsmRec->offRelJump] = 0xDEADBEEF;
[1]612
613 patmPatchAddJump(pVM, pPatch, &pPB[pPatchAsmRec->offRelJump - 1], 1, pTargetGC, opcode);
614
615 PATCHGEN_EPILOG(pPatch, size);
616 return VINF_SUCCESS;
617}
618
[9212]619int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
[1]620{
621 uint32_t offset = 0;
[54746]622 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
[1]623
624 // internal relative jumps from patch code to patch code; no relocation record required
625
626 Assert(PATMIsPatchGCAddr(pVM, pTargetGC) == false);
627
628 switch (opcode)
629 {
630 case OP_JO:
631 pPB[1] = 0x80;
632 break;
633 case OP_JNO:
634 pPB[1] = 0x81;
635 break;
636 case OP_JC:
637 pPB[1] = 0x82;
638 break;
639 case OP_JNC:
640 pPB[1] = 0x83;
641 break;
642 case OP_JE:
643 pPB[1] = 0x84;
644 break;
645 case OP_JNE:
646 pPB[1] = 0x85;
647 break;
648 case OP_JBE:
649 pPB[1] = 0x86;
650 break;
651 case OP_JNBE:
652 pPB[1] = 0x87;
653 break;
654 case OP_JS:
655 pPB[1] = 0x88;
656 break;
657 case OP_JNS:
658 pPB[1] = 0x89;
659 break;
660 case OP_JP:
661 pPB[1] = 0x8A;
662 break;
663 case OP_JNP:
664 pPB[1] = 0x8B;
665 break;
666 case OP_JL:
667 pPB[1] = 0x8C;
668 break;
669 case OP_JNL:
670 pPB[1] = 0x8D;
671 break;
672 case OP_JLE:
673 pPB[1] = 0x8E;
674 break;
675 case OP_JNLE:
676 pPB[1] = 0x8F;
677 break;
678
679 case OP_JMP:
680 /* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
681 /* Add lookup record for patch to guest address translation */
[44362]682 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
[1]683
684 pPB[0] = 0xE9;
685 break;
686
687 case OP_JECXZ:
688 case OP_LOOP:
689 case OP_LOOPNE:
690 case OP_LOOPE:
691 return patmPatchGenLoop(pVM, pPatch, pTargetGC, opcode, fSizeOverride);
692
693 default:
694 AssertMsg(0, ("Invalid jump opcode %d\n", opcode));
695 return VERR_PATCHING_REFUSED;
696 }
697 if (opcode != OP_JMP)
698 {
699 pPB[0] = 0xF;
700 offset += 2;
701 }
702 else offset++;
703
[9228]704 *(RTRCPTR *)&pPB[offset] = 0xDEADBEEF;
[1]705
706 patmPatchAddJump(pVM, pPatch, pPB, offset, pTargetGC, opcode);
707
[9228]708 offset += sizeof(RTRCPTR);
[1]709
710 PATCHGEN_EPILOG(pPatch, offset);
711 return VINF_SUCCESS;
712}
713
714/*
715 * Rewrite call to dynamic or currently unknown function (on-demand patching of function)
716 */
[9228]717int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC, RTRCPTR pTargetGC, bool fIndirect)
[1]718{
719 PATMCALLINFO callInfo;
720 uint32_t offset;
721 uint32_t i, size;
722 int rc;
723
724 /** @note Don't check for IF=1 here. The ret instruction will do this. */
725 /** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
726
727 /* 1: Clear PATM interrupt flag on entry. */
728 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
729 if (rc == VERR_NO_MEMORY)
730 return rc;
731 AssertRCReturn(rc, rc);
732
[54746]733 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
[1]734 /* 2: We must push the target address onto the stack before appending the indirect call code. */
735
736 if (fIndirect)
737 {
738 Log(("patmPatchGenIndirectCall\n"));
[41739]739 Assert(pCpu->Param1.cb == 4);
[41738]740 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J);
[1]741
742 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
743 * a page fault. The assembly code restores the stack afterwards.
744 */
745 offset = 0;
[1465]746 /* include prefix byte to make sure we don't use the incorrect selector register. */
[41734]747 if (pCpu->fPrefix & DISPREFIX_SEG)
[1465]748 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
[1]749 pPB[offset++] = 0xFF; // push r/m32
[8333]750 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
[1]751 i = 2; /* standard offset of modrm bytes */
[41734]752 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
[1]753 i++; //skip operand prefix
[41734]754 if (pCpu->fPrefix & DISPREFIX_SEG)
[1]755 i++; //skip segment prefix
756
[41732]757 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
[1]758 AssertRCReturn(rc, rc);
[41732]759 offset += (pCpu->cbInstr - i);
[1]760 }
761 else
762 {
[13822]763 AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%RRv)?!?\n", pTargetGC));
[1]764 Assert(pTargetGC);
[41738]765 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J);
[1]766
767 /** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
768
769 /* Relative call to patch code (patch to patch -> no fixup). */
[41732]770 Log(("PatchGenCall from %RRv (next=%RRv) to %RRv\n", pCurInstrGC, pCurInstrGC + pCpu->cbInstr, pTargetGC));
[1]771
772 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
773 * a page fault. The assembly code restores the stack afterwards.
774 */
775 offset = 0;
776 pPB[offset++] = 0x68; // push %Iv
[9228]777 *(RTRCPTR *)&pPB[offset] = pTargetGC;
778 offset += sizeof(RTRCPTR);
[1]779 }
780
781 /* align this block properly to make sure the jump table will not be misaligned. */
782 size = (RTHCUINTPTR)&pPB[offset] & 3;
783 if (size)
784 size = 4 - size;
785
786 for (i=0;i<size;i++)
787 {
788 pPB[offset++] = 0x90; /* nop */
789 }
790 PATCHGEN_EPILOG(pPatch, offset);
791
792 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
[54746]793 PCPATCHASMRECORD pPatchAsmRec = fIndirect ? &g_patmCallIndirectRecord : &g_patmCallRecord;
794 PATCHGEN_PROLOG_NODEF(pVM, pPatch, pPatchAsmRec->cbFunction);
[41732]795 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
[1]796 callInfo.pTargetGC = (fIndirect) ? 0xDEADBEEF : pTargetGC;
[54746]797 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false, &callInfo);
[1]798 PATCHGEN_EPILOG(pPatch, size);
799
[54764]800 /* Need to set PATM_ASMFIX_INTERRUPTFLAG after the patched ret returns here. */
[1]801 rc = patmPatchGenSetPIF(pVM, pPatch, pCurInstrGC);
802 if (rc == VERR_NO_MEMORY)
803 return rc;
804 AssertRCReturn(rc, rc);
805
806 STAM_COUNTER_INC(&pVM->patm.s.StatGenCall);
807 return VINF_SUCCESS;
808}
809
810/**
811 * Generate indirect jump to unknown destination
812 *
813 * @returns VBox status code.
[58122]814 * @param pVM The cross context VM structure.
[1]815 * @param pPatch Patch record
816 * @param pCpu Disassembly state
817 * @param pCurInstrGC Current instruction address
818 */
[9228]819int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
[1]820{
821 PATMCALLINFO callInfo;
822 uint32_t offset;
823 uint32_t i, size;
824 int rc;
825
826 /* 1: Clear PATM interrupt flag on entry. */
827 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
828 if (rc == VERR_NO_MEMORY)
829 return rc;
830 AssertRCReturn(rc, rc);
831
[54746]832 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
[1]833 /* 2: We must push the target address onto the stack before appending the indirect call code. */
834
835 Log(("patmPatchGenIndirectJump\n"));
[41739]836 Assert(pCpu->Param1.cb == 4);
[41738]837 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J);
[1]838
839 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
840 * a page fault. The assembly code restores the stack afterwards.
841 */
842 offset = 0;
[1465]843 /* include prefix byte to make sure we don't use the incorrect selector register. */
[41734]844 if (pCpu->fPrefix & DISPREFIX_SEG)
[1465]845 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
846
[1]847 pPB[offset++] = 0xFF; // push r/m32
[8333]848 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
[1]849 i = 2; /* standard offset of modrm bytes */
[41734]850 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
[1]851 i++; //skip operand prefix
[41734]852 if (pCpu->fPrefix & DISPREFIX_SEG)
[1]853 i++; //skip segment prefix
854
[41732]855 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
[1]856 AssertRCReturn(rc, rc);
[41732]857 offset += (pCpu->cbInstr - i);
[1]858
859 /* align this block properly to make sure the jump table will not be misaligned. */
860 size = (RTHCUINTPTR)&pPB[offset] & 3;
861 if (size)
862 size = 4 - size;
863
864 for (i=0;i<size;i++)
865 {
866 pPB[offset++] = 0x90; /* nop */
867 }
868 PATCHGEN_EPILOG(pPatch, offset);
869
870 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
[54746]871 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmJumpIndirectRecord.cbFunction);
[41732]872 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
[1]873 callInfo.pTargetGC = 0xDEADBEEF;
[54687]874 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmJumpIndirectRecord, 0, false, &callInfo);
[1]875 PATCHGEN_EPILOG(pPatch, size);
876
877 STAM_COUNTER_INC(&pVM->patm.s.StatGenJump);
878 return VINF_SUCCESS;
879}
880
881/**
882 * Generate return instruction
883 *
884 * @returns VBox status code.
[58122]885 * @param pVM The cross context VM structure.
[1]886 * @param pPatch Patch structure
887 * @param pCpu Disassembly struct
888 * @param pCurInstrGC Current instruction pointer
889 *
890 */
[9212]891int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
[1]892{
[9228]893 RTRCPTR pPatchRetInstrGC;
[1]894
895 /* Remember start of this patch for below. */
896 pPatchRetInstrGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
897
[13822]898 Log(("patmPatchGenRet %RRv\n", pCurInstrGC));
[1]899
900 /** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
901 if ( pPatch->pTempInfo->pPatchRetInstrGC
[41741]902 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->Param1.uValue) /* nr of bytes popped off the stack should be identical of course! */
[1]903 {
[41737]904 Assert(pCpu->pCurInstr->uOpcode == OP_RETN);
[1]905 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused);
906
907 return patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, pPatch->pTempInfo->pPatchRetInstrGC);
908 }
909
910 /* Jump back to the original instruction if IF is set again. */
[44362]911 Assert(!patmFindActivePatchByEntrypoint(pVM, pCurInstrGC));
[54746]912 int rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
[1]913 AssertRCReturn(rc, rc);
914
915 /* align this block properly to make sure the jump table will not be misaligned. */
[54746]916 PATCHGEN_PROLOG(pVM, pPatch, 4);
917 uint32_t size = (RTHCUINTPTR)pPB & 3;
[1]918 if (size)
919 size = 4 - size;
920
[54746]921 for (uint32_t i = 0; i < size; i++)
[1]922 pPB[i] = 0x90; /* nop */
923 PATCHGEN_EPILOG(pPatch, size);
924
[54746]925 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmRetRecord.cbFunction);
[54687]926 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmRetRecord, 0, false);
[1]927 PATCHGEN_EPILOG(pPatch, size);
928
929 STAM_COUNTER_INC(&pVM->patm.s.StatGenRet);
930 /* Duplicate the ret or ret n instruction; it will use the PATM return address */
931 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
932
933 if (rc == VINF_SUCCESS)
934 {
935 pPatch->pTempInfo->pPatchRetInstrGC = pPatchRetInstrGC;
[41741]936 pPatch->pTempInfo->uPatchRetParam1 = pCpu->Param1.uValue;
[1]937 }
938 return rc;
939}
940
941/**
942 * Generate all global patm functions
943 *
944 * @returns VBox status code.
[58122]945 * @param pVM The cross context VM structure.
[1]946 * @param pPatch Patch structure
947 *
948 */
949int patmPatchGenGlobalFunctions(PVM pVM, PPATCHINFO pPatch)
950{
951 pVM->patm.s.pfnHelperCallGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
[54746]952 PATCHGEN_PROLOG(pVM, pPatch, g_patmLookupAndCallRecord.cbFunction);
953 uint32_t size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmLookupAndCallRecord, 0, false);
[1]954 PATCHGEN_EPILOG(pPatch, size);
955
956 /* Round to next 8 byte boundary. */
957 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
958
959 pVM->patm.s.pfnHelperRetGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
[54746]960 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmRetFunctionRecord.cbFunction);
[54687]961 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmRetFunctionRecord, 0, false);
[1]962 PATCHGEN_EPILOG(pPatch, size);
963
964 /* Round to next 8 byte boundary. */
965 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
966
967 pVM->patm.s.pfnHelperJumpGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
[54746]968 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmLookupAndJumpRecord.cbFunction);
[54687]969 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmLookupAndJumpRecord, 0, false);
[1]970 PATCHGEN_EPILOG(pPatch, size);
971
[302]972 /* Round to next 8 byte boundary. */
973 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
974
975 pVM->patm.s.pfnHelperIretGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
[54746]976 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmIretFunctionRecord.cbFunction);
[54687]977 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmIretFunctionRecord, 0, false);
[302]978 PATCHGEN_EPILOG(pPatch, size);
979
[13822]980 Log(("pfnHelperCallGC %RRv\n", pVM->patm.s.pfnHelperCallGC));
981 Log(("pfnHelperRetGC %RRv\n", pVM->patm.s.pfnHelperRetGC));
982 Log(("pfnHelperJumpGC %RRv\n", pVM->patm.s.pfnHelperJumpGC));
983 Log(("pfnHelperIretGC %RRv\n", pVM->patm.s.pfnHelperIretGC));
[1]984
985 return VINF_SUCCESS;
986}
987
988/**
989 * Generate illegal instruction (int 3)
990 *
991 * @returns VBox status code.
[58122]992 * @param pVM The cross context VM structure.
[1]993 * @param pPatch Patch structure
994 *
995 */
996int patmPatchGenIllegalInstr(PVM pVM, PPATCHINFO pPatch)
997{
[54746]998 PATCHGEN_PROLOG(pVM, pPatch, 1);
[1]999
1000 pPB[0] = 0xCC;
1001
1002 PATCHGEN_EPILOG(pPatch, 1);
1003 return VINF_SUCCESS;
1004}
1005
1006/**
1007 * Check virtual IF flag and jump back to original guest code if set
1008 *
1009 * @returns VBox status code.
[58122]1010 * @param pVM The cross context VM structure.
[1]1011 * @param pPatch Patch structure
1012 * @param pCurInstrGC Guest context pointer to the current instruction
1013 *
1014 */
[9228]1015int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
[1]1016{
1017 uint32_t size;
1018
[54746]1019 PATCHGEN_PROLOG(pVM, pPatch, g_patmCheckIFRecord.cbFunction);
[1]1020
1021 /* Add lookup record for patch to guest address translation */
[44362]1022 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
[1]1023
1024 /* Generate code to check for IF=1 before executing the call to the duplicated function. */
[54687]1025 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCheckIFRecord, pCurInstrGC, true);
[1]1026
1027 PATCHGEN_EPILOG(pPatch, size);
1028 return VINF_SUCCESS;
1029}
1030
1031/**
1032 * Set PATM interrupt flag
1033 *
1034 * @returns VBox status code.
[58122]1035 * @param pVM The cross context VM structure.
[1]1036 * @param pPatch Patch structure
1037 * @param pInstrGC Corresponding guest instruction
1038 *
1039 */
[9228]1040int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
[1]1041{
[54746]1042 PATCHGEN_PROLOG(pVM, pPatch, g_patmSetPIFRecord.cbFunction);
[1]1043
1044 /* Add lookup record for patch to guest address translation */
[44362]1045 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
[1]1046
[54746]1047 uint32_t size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmSetPIFRecord, 0, false);
[1]1048 PATCHGEN_EPILOG(pPatch, size);
1049 return VINF_SUCCESS;
1050}
1051
1052/**
1053 * Clear PATM interrupt flag
1054 *
1055 * @returns VBox status code.
[58122]1056 * @param pVM The cross context VM structure.
[1]1057 * @param pPatch Patch structure
1058 * @param pInstrGC Corresponding guest instruction
1059 *
1060 */
[9228]1061int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
[1]1062{
[54746]1063 PATCHGEN_PROLOG(pVM, pPatch, g_patmSetPIFRecord.cbFunction);
[1]1064
1065 /* Add lookup record for patch to guest address translation */
[44362]1066 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
[1]1067
[54746]1068 uint32_t size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmClearPIFRecord, 0, false);
[1]1069 PATCHGEN_EPILOG(pPatch, size);
1070 return VINF_SUCCESS;
1071}
1072
1073
1074/**
1075 * Clear PATM inhibit irq flag
1076 *
1077 * @returns VBox status code.
[58122]1078 * @param pVM The cross context VM structure.
[1]1079 * @param pPatch Patch structure
1080 * @param pNextInstrGC Next guest instruction
1081 */
[9228]1082int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTRCPTR pNextInstrGC)
[1]1083{
1084 PATMCALLINFO callInfo;
[54746]1085 PCPATCHASMRECORD pPatchAsmRec = pPatch->flags & PATMFL_DUPLICATE_FUNCTION
1086 ? &g_patmClearInhibitIRQContIF0Record : &g_patmClearInhibitIRQFaultIF0Record;
1087 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
[1]1088
1089 Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
1090
1091 /* Add lookup record for patch to guest address translation */
[44362]1092 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
[1]1093
1094 callInfo.pNextInstrGC = pNextInstrGC;
1095
[54746]1096 uint32_t size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false, &callInfo);
[1]1097
1098 PATCHGEN_EPILOG(pPatch, size);
1099 return VINF_SUCCESS;
1100}
1101
1102/**
1103 * Generate an interrupt handler entrypoint
1104 *
1105 * @returns VBox status code.
[58122]1106 * @param pVM The cross context VM structure.
[1]1107 * @param pPatch Patch record
1108 * @param pIntHandlerGC IDT handler address
1109 *
1110 ** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
1111 */
[9228]1112int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC)
[1]1113{
1114 int rc = VINF_SUCCESS;
1115
[45485]1116 if (!EMIsRawRing1Enabled(pVM)) /* direct passthru of interrupts is not allowed in the ring-1 support case as we can't
1117 deal with the ring-1/2 ambiguity in the patm asm code and we don't need it either as
1118 TRPMForwardTrap takes care of the details. */
[45276]1119 {
1120 uint32_t size;
[54746]1121 PCPATCHASMRECORD pPatchAsmRec = pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE
1122 ? &g_patmIntEntryRecordErrorCode : &g_patmIntEntryRecord;
1123 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
[1]1124
[45276]1125 /* Add lookup record for patch to guest address translation */
1126 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
[1]1127
[45276]1128 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
[54746]1129 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
[1]1130
[45276]1131 PATCHGEN_EPILOG(pPatch, size);
1132 }
[1]1133
1134 // Interrupt gates set IF to 0
1135 rc = patmPatchGenCli(pVM, pPatch);
1136 AssertRCReturn(rc, rc);
1137
1138 return rc;
1139}
1140
1141/**
1142 * Generate a trap handler entrypoint
1143 *
1144 * @returns VBox status code.
[58122]1145 * @param pVM The cross context VM structure.
[1]1146 * @param pPatch Patch record
1147 * @param pTrapHandlerGC IDT handler address
1148 */
[9228]1149int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTrapHandlerGC)
[1]1150{
1151 uint32_t size;
[54746]1152 PCPATCHASMRECORD pPatchAsmRec = (pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE)
1153 ? &g_patmTrapEntryRecordErrorCode : &g_patmTrapEntryRecord;
[1]1154
[45276]1155 Assert(!EMIsRawRing1Enabled(pVM));
1156
[54746]1157 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
[1]1158
1159 /* Add lookup record for patch to guest address translation */
[44362]1160 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
[1]1161
1162 /* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
[54746]1163 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, pTrapHandlerGC, true);
[1]1164 PATCHGEN_EPILOG(pPatch, size);
1165
1166 return VINF_SUCCESS;
1167}
1168
1169#ifdef VBOX_WITH_STATISTICS
[9228]1170int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
[1]1171{
1172 uint32_t size;
1173
[54746]1174 PATCHGEN_PROLOG(pVM, pPatch, g_patmStatsRecord.cbFunction);
[1]1175
1176 /* Add lookup record for stats code -> guest handler. */
[44362]1177 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
[1]1178
1179 /* Generate code to keep calling statistics for this patch */
[54687]1180 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmStatsRecord, pInstrGC, false);
[1]1181 PATCHGEN_EPILOG(pPatch, size);
1182
1183 return VINF_SUCCESS;
1184}
1185#endif
1186
1187/**
1188 * Debug register moves to or from general purpose registers
1189 * mov GPR, DRx
1190 * mov DRx, GPR
1191 *
1192 * @todo: if we ever want to support hardware debug registers natively, then
1193 * this will need to be changed!
1194 */
1195int patmPatchGenMovDebug(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1196{
1197 int rc = VINF_SUCCESS;
[41728]1198 unsigned reg, mod, rm, dbgreg;
[1]1199 uint32_t offset;
1200
[54746]1201 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
[1]1202
1203 mod = 0; //effective address (only)
1204 rm = 5; //disp32
[41738]1205 if (pCpu->pCurInstr->fParam1 == OP_PARM_Dd)
[1]1206 {
1207 Assert(0); // You not come here. Illegal!
1208
1209 // mov DRx, GPR
1210 pPB[0] = 0x89; //mov disp32, GPR
[41739]1211 Assert(pCpu->Param1.fUse & DISUSE_REG_DBG);
1212 Assert(pCpu->Param2.fUse & DISUSE_REG_GEN32);
[1]1213
[41744]1214 dbgreg = pCpu->Param1.Base.idxDbgReg;
1215 reg = pCpu->Param2.Base.idxGenReg;
[1]1216 }
1217 else
1218 {
1219 // mov GPR, DRx
[41739]1220 Assert(pCpu->Param1.fUse & DISUSE_REG_GEN32);
1221 Assert(pCpu->Param2.fUse & DISUSE_REG_DBG);
[1]1222
1223 pPB[0] = 0x8B; // mov GPR, disp32
[41744]1224 reg = pCpu->Param1.Base.idxGenReg;
1225 dbgreg = pCpu->Param2.Base.idxDbgReg;
[1]1226 }
1227
1228 pPB[1] = MAKE_MODRM(mod, reg, rm);
1229
[41728]1230 AssertReturn(dbgreg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
[73097]1231 offset = RT_UOFFSETOF_DYN(CPUMCTX, dr[dbgreg]);
[12600]1232
[9228]1233 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
[1]1234 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1235
[9228]1236 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
[1]1237 return rc;
1238}
1239
1240/*
1241 * Control register moves to or from general purpose registers
1242 * mov GPR, CRx
1243 * mov CRx, GPR
1244 */
1245int patmPatchGenMovControl(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1246{
1247 int rc = VINF_SUCCESS;
1248 int reg, mod, rm, ctrlreg;
1249 uint32_t offset;
1250
[54746]1251 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
[1]1252
1253 mod = 0; //effective address (only)
1254 rm = 5; //disp32
[41738]1255 if (pCpu->pCurInstr->fParam1 == OP_PARM_Cd)
[1]1256 {
1257 Assert(0); // You not come here. Illegal!
1258
1259 // mov CRx, GPR
1260 pPB[0] = 0x89; //mov disp32, GPR
[41744]1261 ctrlreg = pCpu->Param1.Base.idxCtrlReg;
1262 reg = pCpu->Param2.Base.idxGenReg;
[41739]1263 Assert(pCpu->Param1.fUse & DISUSE_REG_CR);
1264 Assert(pCpu->Param2.fUse & DISUSE_REG_GEN32);
[1]1265 }
1266 else
1267 {
[42481]1268 // mov GPR, CRx
[41739]1269 Assert(pCpu->Param1.fUse & DISUSE_REG_GEN32);
1270 Assert(pCpu->Param2.fUse & DISUSE_REG_CR);
[1]1271
1272 pPB[0] = 0x8B; // mov GPR, disp32
[41744]1273 reg = pCpu->Param1.Base.idxGenReg;
1274 ctrlreg = pCpu->Param2.Base.idxCtrlReg;
[1]1275 }
1276
1277 pPB[1] = MAKE_MODRM(mod, reg, rm);
1278
[63560]1279 /// @todo make this an array in the context structure
[1]1280 switch (ctrlreg)
1281 {
[41728]1282 case DISCREG_CR0:
[1]1283 offset = RT_OFFSETOF(CPUMCTX, cr0);
1284 break;
[41728]1285 case DISCREG_CR2:
[1]1286 offset = RT_OFFSETOF(CPUMCTX, cr2);
1287 break;
[41728]1288 case DISCREG_CR3:
[1]1289 offset = RT_OFFSETOF(CPUMCTX, cr3);
1290 break;
[41728]1291 case DISCREG_CR4:
[1]1292 offset = RT_OFFSETOF(CPUMCTX, cr4);
1293 break;
1294 default: /* Shut up compiler warning. */
1295 AssertFailed();
1296 offset = 0;
1297 break;
1298 }
[9228]1299 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
[1]1300 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1301
[9228]1302 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
[1]1303 return rc;
1304}
1305
[2043]1306/*
1307 * mov GPR, SS
1308 */
[9228]1309int patmPatchGenMovFromSS(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
[2043]1310{
1311 uint32_t size, offset;
[1]1312
[62652]1313 Log(("patmPatchGenMovFromSS %RRv\n", pCurInstrGC)); RT_NOREF_PV(pCurInstrGC);
[5285]1314
[2047]1315 Assert(pPatch->flags & PATMFL_CODE32);
[2046]1316
[54746]1317 PATCHGEN_PROLOG(pVM, pPatch, g_patmClearPIFRecord.cbFunction + 2 + g_patmMovFromSSRecord.cbFunction + 2 + g_patmSetPIFRecord.cbFunction);
[54687]1318 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmClearPIFRecord, 0, false);
[2043]1319 PATCHGEN_EPILOG(pPatch, size);
1320
[2047]1321 /* push ss */
[54746]1322 PATCHGEN_PROLOG_NODEF(pVM, pPatch, 2);
[2047]1323 offset = 0;
[41734]1324 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
[2047]1325 pPB[offset++] = 0x66; /* size override -> 16 bits push */
1326 pPB[offset++] = 0x16;
1327 PATCHGEN_EPILOG(pPatch, offset);
1328
1329 /* checks and corrects RPL of pushed ss*/
[54746]1330 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmMovFromSSRecord.cbFunction);
[54687]1331 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmMovFromSSRecord, 0, false);
[2043]1332 PATCHGEN_EPILOG(pPatch, size);
1333
1334 /* pop general purpose register */
[54746]1335 PATCHGEN_PROLOG_NODEF(pVM, pPatch, 2);
[2043]1336 offset = 0;
[41734]1337 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
[2043]1338 pPB[offset++] = 0x66; /* size override -> 16 bits pop */
[41744]1339 pPB[offset++] = 0x58 + pCpu->Param1.Base.idxGenReg;
[2043]1340 PATCHGEN_EPILOG(pPatch, offset);
1341
1342
[54746]1343 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmSetPIFRecord.cbFunction);
[54687]1344 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmSetPIFRecord, 0, false);
[2043]1345 PATCHGEN_EPILOG(pPatch, size);
1346
1347 return VINF_SUCCESS;
1348}
1349
1350
[1]1351/**
1352 * Generate an sldt or str patch instruction
1353 *
1354 * @returns VBox status code.
[58122]1355 * @param pVM The cross context VM structure.
[1]1356 * @param pPatch Patch record
1357 * @param pCpu Disassembly state
1358 * @param pCurInstrGC Guest instruction address
1359 */
[9228]1360int patmPatchGenSldtStr(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
[1]1361{
1362 // sldt %Ew
1363 int rc = VINF_SUCCESS;
1364 uint32_t offset = 0;
1365 uint32_t i;
1366
1367 /** @todo segment prefix (untested) */
[41734]1368 Assert(pCpu->fPrefix == DISPREFIX_NONE || pCpu->fPrefix == DISPREFIX_OPSIZE);
[1]1369
[54746]1370 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
[1]1371
[41739]1372 if (pCpu->Param1.fUse == DISUSE_REG_GEN32 || pCpu->Param1.fUse == DISUSE_REG_GEN16)
[1]1373 {
1374 /* Register operand */
1375 // 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
1376
[41734]1377 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
[1]1378 pPB[offset++] = 0x66;
1379
1380 pPB[offset++] = 0x8B; // mov destreg, CPUMCTX.tr/ldtr
1381 /* Modify REG part according to destination of original instruction */
[41744]1382 pPB[offset++] = MAKE_MODRM(0, pCpu->Param1.Base.idxGenReg, 5);
[41737]1383 if (pCpu->pCurInstr->uOpcode == OP_STR)
[1]1384 {
[9228]1385 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
[1]1386 }
1387 else
1388 {
[9228]1389 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
[1]1390 }
1391 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
[9228]1392 offset += sizeof(RTRCPTR);
[1]1393 }
1394 else
1395 {
1396 /* Memory operand */
1397 //50 push eax
1398 //52 push edx
1399 //8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1400 //66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
1401 //66 89 02 mov word ptr [edx],ax
1402 //5A pop edx
1403 //58 pop eax
1404
1405 pPB[offset++] = 0x50; // push eax
1406 pPB[offset++] = 0x52; // push edx
1407
[41734]1408 if (pCpu->fPrefix == DISPREFIX_SEG)
[1]1409 {
[1465]1410 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
[1]1411 }
1412 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1413 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
[41727]1414 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
[1]1415
1416 i = 3; /* standard offset of modrm bytes */
[41734]1417 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
[1]1418 i++; //skip operand prefix
[41734]1419 if (pCpu->fPrefix == DISPREFIX_SEG)
[1]1420 i++; //skip segment prefix
1421
[41732]1422 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
[1]1423 AssertRCReturn(rc, rc);
[41732]1424 offset += (pCpu->cbInstr - i);
[1]1425
1426 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr
1427 pPB[offset++] = 0xA1;
[41737]1428 if (pCpu->pCurInstr->uOpcode == OP_STR)
[1]1429 {
[9228]1430 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
[1]1431 }
1432 else
1433 {
[9228]1434 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
[1]1435 }
1436 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
[9228]1437 offset += sizeof(RTRCPTR);
[1]1438
1439 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1440 pPB[offset++] = 0x89;
1441 pPB[offset++] = 0x02;
1442
1443 pPB[offset++] = 0x5A; // pop edx
1444 pPB[offset++] = 0x58; // pop eax
1445 }
1446
1447 PATCHGEN_EPILOG(pPatch, offset);
1448
1449 return rc;
1450}
1451
1452/**
1453 * Generate an sgdt or sidt patch instruction
1454 *
1455 * @returns VBox status code.
[58122]1456 * @param pVM The cross context VM structure.
[1]1457 * @param pPatch Patch record
1458 * @param pCpu Disassembly state
1459 * @param pCurInstrGC Guest instruction address
1460 */
[9228]1461int patmPatchGenSxDT(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
[1]1462{
1463 int rc = VINF_SUCCESS;
1464 uint32_t offset = 0, offset_base, offset_limit;
1465 uint32_t i;
1466
[63560]1467 /** @todo segment prefix (untested) */
[41734]1468 Assert(pCpu->fPrefix == DISPREFIX_NONE);
[1]1469
1470 // sgdt %Ms
1471 // sidt %Ms
1472
[41737]1473 switch (pCpu->pCurInstr->uOpcode)
[1]1474 {
1475 case OP_SGDT:
1476 offset_base = RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1477 offset_limit = RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1478 break;
1479
1480 case OP_SIDT:
1481 offset_base = RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1482 offset_limit = RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1483 break;
1484
1485 default:
1486 return VERR_INVALID_PARAMETER;
1487 }
1488
1489//50 push eax
1490//52 push edx
1491//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1492//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
1493//66 89 02 mov word ptr [edx],ax
1494//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
1495//89 42 02 mov dword ptr [edx+2],eax
1496//5A pop edx
1497//58 pop eax
1498
[54746]1499 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
[1]1500 pPB[offset++] = 0x50; // push eax
1501 pPB[offset++] = 0x52; // push edx
1502
[41734]1503 if (pCpu->fPrefix == DISPREFIX_SEG)
[1]1504 {
[1465]1505 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
[1]1506 }
1507 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1508 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
[41727]1509 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
[1]1510
1511 i = 3; /* standard offset of modrm bytes */
[41734]1512 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
[1]1513 i++; //skip operand prefix
[41734]1514 if (pCpu->fPrefix == DISPREFIX_SEG)
[1]1515 i++; //skip segment prefix
[41732]1516 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
[1]1517 AssertRCReturn(rc, rc);
[41732]1518 offset += (pCpu->cbInstr - i);
[1]1519
1520 pPB[offset++] = 0x66; // mov ax, CPUMCTX.gdtr.limit
1521 pPB[offset++] = 0xA1;
[9228]1522 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_limit;
[1]1523 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
[9228]1524 offset += sizeof(RTRCPTR);
[1]1525
1526 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1527 pPB[offset++] = 0x89;
1528 pPB[offset++] = 0x02;
1529
1530 pPB[offset++] = 0xA1; // mov eax, CPUMCTX.gdtr.base
[9228]1531 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_base;
[1]1532 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
[9228]1533 offset += sizeof(RTRCPTR);
[1]1534
1535 pPB[offset++] = 0x89; // mov dword ptr [edx+2],eax
1536 pPB[offset++] = 0x42;
1537 pPB[offset++] = 0x02;
1538
1539 pPB[offset++] = 0x5A; // pop edx
1540 pPB[offset++] = 0x58; // pop eax
1541
1542 PATCHGEN_EPILOG(pPatch, offset);
1543
1544 return rc;
1545}
1546
1547/**
1548 * Generate a cpuid patch instruction
1549 *
1550 * @returns VBox status code.
[58122]1551 * @param pVM The cross context VM structure.
[1]1552 * @param pPatch Patch record
1553 * @param pCurInstrGC Guest instruction address
1554 */
[9228]1555int patmPatchGenCpuid(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
[1]1556{
1557 uint32_t size;
[54746]1558 PATCHGEN_PROLOG(pVM, pPatch, g_patmCpuidRecord.cbFunction);
[1]1559
[54687]1560 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCpuidRecord, 0, false);
[1]1561
1562 PATCHGEN_EPILOG(pPatch, size);
[39078]1563 NOREF(pCurInstrGC);
[1]1564 return VINF_SUCCESS;
1565}
1566
1567/**
1568 * Generate the jump from guest to patch code
1569 *
1570 * @returns VBox status code.
[58126]1571 * @param pVM The cross context VM structure.
1572 * @param pPatch Patch record
1573 * @param pReturnAddrGC Guest code target of the jump.
[1]1574 * @param fClearInhibitIRQs Clear inhibit irq flag
1575 */
[9212]1576int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fClearInhibitIRQs)
[1]1577{
1578 int rc = VINF_SUCCESS;
1579 uint32_t size;
1580
1581 if (fClearInhibitIRQs)
1582 {
1583 rc = patmPatchGenClearInhibitIRQ(pVM, pPatch, pReturnAddrGC);
1584 if (rc == VERR_NO_MEMORY)
1585 return rc;
1586 AssertRCReturn(rc, rc);
1587 }
1588
[54746]1589 PATCHGEN_PROLOG(pVM, pPatch, PATMJumpToGuest_IF1Record.cbFunction);
[1]1590
1591 /* Add lookup record for patch to guest address translation */
[44362]1592 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
[1]1593
1594 /* Generate code to jump to guest code if IF=1, else fault. */
1595 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
1596 PATCHGEN_EPILOG(pPatch, size);
1597
1598 return rc;
1599}
1600
1601/*
1602 * Relative jump from patch code to patch code (no fixup required)
1603 */
[9228]1604int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RCPTRTYPE(uint8_t *) pPatchAddrGC, bool fAddLookupRecord)
[1]1605{
1606 int32_t displ;
1607 int rc = VINF_SUCCESS;
1608
1609 Assert(PATMIsPatchGCAddr(pVM, pPatchAddrGC));
[54746]1610 PATCHGEN_PROLOG(pVM, pPatch, SIZEOF_NEARJUMP32);
[1]1611
[2030]1612 if (fAddLookupRecord)
1613 {
1614 /* Add lookup record for patch to guest address translation */
[44362]1615 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
[2030]1616 }
[1]1617
1618 pPB[0] = 0xE9; //JMP
1619
1620 displ = pPatchAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + SIZEOF_NEARJUMP32);
1621
1622 *(uint32_t *)&pPB[1] = displ;
1623
1624 PATCHGEN_EPILOG(pPatch, SIZEOF_NEARJUMP32);
1625
1626 return rc;
1627}
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use