VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 76553

Last change on this file since 76553 was 76553, checked in by vboxsync, 5 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 264.0 KB
RevLine 
[23]1/* $Id: PATM.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
[1]2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
[44362]5 * @note Never ever reuse patch memory!!
[1]6 */
7
8/*
[76553]9 * Copyright (C) 2006-2019 Oracle Corporation
[1]10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
[5999]14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
[1]18 */
19
[58396]20/** @page pg_patm PATM - Patch Manager
21 *
22 * The patch manager (PATM) patches privileged guest code to allow it to execute
23 * directly in raw-mode.
24 *
25 * The PATM works closely together with the @ref pg_csam "CSAM" detect code
26 * needing patching and detected changes to the patch. It also interfaces with
27 * other components, like @ref pg_trpm "TRPM" and @ref pg_rem "REM", for these
28 * purposes.
29 *
30 * @sa @ref grp_patm
31 */
[57358]32
[62460]33
[57358]34/*********************************************************************************************************************************
35* Header Files *
36*********************************************************************************************************************************/
[1]37#define LOG_GROUP LOG_GROUP_PATM
[35346]38#include <VBox/vmm/patm.h>
39#include <VBox/vmm/stam.h>
[54763]40#include <VBox/vmm/pdmapi.h>
[35346]41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/cpum.h>
43#include <VBox/vmm/cpumdis.h>
44#include <VBox/vmm/iom.h>
45#include <VBox/vmm/mm.h>
46#include <VBox/vmm/em.h>
[45620]47#include <VBox/vmm/hm.h>
[35346]48#include <VBox/vmm/ssm.h>
49#include <VBox/vmm/trpm.h>
50#include <VBox/vmm/cfgm.h>
[1]51#include <VBox/param.h>
[35346]52#include <VBox/vmm/selm.h>
[44362]53#include <VBox/vmm/csam.h>
[1]54#include <iprt/avl.h>
55#include "PATMInternal.h"
56#include "PATMPatch.h"
[35346]57#include <VBox/vmm/vm.h>
[44362]58#include <VBox/vmm/uvm.h>
[1]59#include <VBox/dbg.h>
60#include <VBox/err.h>
61#include <VBox/log.h>
62#include <iprt/assert.h>
63#include <iprt/asm.h>
64#include <VBox/dis.h>
65#include <VBox/disopcode.h>
66
67#include <iprt/string.h>
68#include "PATMA.h"
69
70//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
71//#define PATM_DISABLE_ALL
72
[36669]73/**
74 * Refresh trampoline patch state.
75 */
76typedef struct PATMREFRESHPATCH
77{
78 /** Pointer to the VM structure. */
79 PVM pVM;
80 /** The trampoline patch record. */
81 PPATCHINFO pPatchTrampoline;
82 /** The new patch we want to jump to. */
83 PPATCHINFO pPatchRec;
84} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
85
86
[41671]87#define PATMREAD_RAWCODE 1 /* read code as-is */
88#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
89#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
90
91/*
92 * Private structure used during disassembly
93 */
94typedef struct
95{
96 PVM pVM;
97 PPATCHINFO pPatchInfo;
[41760]98 R3PTRTYPE(uint8_t *) pbInstrHC;
[41671]99 RTRCPTR pInstrGC;
100 uint32_t fReadFlags;
101} PATMDISASM, *PPATMDISASM;
102
103
[57358]104/*********************************************************************************************************************************
105* Internal Functions *
106*********************************************************************************************************************************/
[9228]107static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
[1]108static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
109static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
110
[4772]111#ifdef LOG_ENABLED // keep gcc quiet
[9228]112static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
[1]113#endif
114#ifdef VBOX_WITH_STATISTICS
115static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
116static void patmResetStat(PVM pVM, void *pvSample);
117static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
118#endif
119
120#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
121#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
122
123static int patmReinit(PVM pVM);
[54763]124static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
[62651]125#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
[44362]126static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
[62651]127#endif
[44362]128static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
[1]129
130#ifdef VBOX_WITH_DEBUGGER
[9228]131static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
[44399]132static FNDBGCCMD patmr3CmdOn;
133static FNDBGCCMD patmr3CmdOff;
[1]134
135/** Command descriptors. */
136static const DBGCCMD g_aCmds[] =
137{
[35696]138 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
139 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
140 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
[1]141};
142#endif
143
[12532]144/* Don't want to break saved states, so put it here as a global variable. */
[12534]145static unsigned int cIDTHandlersDisabled = 0;
[12532]146
[1]147/**
148 * Initializes the PATM.
149 *
150 * @returns VBox status code.
[58122]151 * @param pVM The cross context VM structure.
[1]152 */
[44362]153VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
[1]154{
155 int rc;
156
[45620]157 /*
158 * We only need a saved state dummy loader if HM is enabled.
159 */
[70948]160 if (!VM_IS_RAW_MODE_ENABLED(pVM))
[45620]161 {
162 pVM->fPATMEnabled = false;
[50575]163 return SSMR3RegisterStub(pVM, "PATM", 0);
[45620]164 }
165
166 /*
167 * Raw-mode.
168 */
[1]169 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
170
[19141]171 /* These values can't change as they are hardcoded in patch code (old saved states!) */
[19660]172 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
[19141]173 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
174 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
175 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
[1]176
[54687]177 AssertReleaseMsg(g_fPatmInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
178 ("Interrupt flags out of sync!! g_fPatmInterruptFlag=%#x expected %#x. broken assembler?\n", g_fPatmInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
[19141]179
[1]180 /* Allocate patch memory and GC patch state memory. */
181 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
182 /* Add another page in case the generated code is much larger than expected. */
183 /** @todo bad safety precaution */
184 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
[13833]185 if (RT_FAILURE(rc))
[1]186 {
[20011]187 Log(("MMHyperAlloc failed with %Rrc\n", rc));
[1]188 return rc;
189 }
[13827]190 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
[1]191
192 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
[9228]193 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
[13827]194 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
[1]195
[46150]196 patmR3DbgInit(pVM);
197
[1]198 /*
199 * Hypervisor memory for GC status data (read/write)
200 *
201 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
202 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
203 *
204 */
[31437]205 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
[1]206 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
[13827]207 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
[1]208
209 /* Hypervisor memory for patch statistics */
210 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
[13827]211 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
[1]212
213 /* Memory for patch lookup trees. */
214 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
215 AssertRCReturn(rc, rc);
[13827]216 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
[1]217
[3696]218#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
[453]219 /* Check CFGM option. */
220 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
[13833]221 if (RT_FAILURE(rc))
[453]222# ifdef PATM_DISABLE_ALL
223 pVM->fPATMEnabled = false;
224# else
225 pVM->fPATMEnabled = true;
[3696]226# endif
[453]227#endif
228
[1]229 rc = patmReinit(pVM);
230 AssertRC(rc);
[13833]231 if (RT_FAILURE(rc))
[1]232 return rc;
233
234 /*
[55895]235 * Register the virtual page access handler type.
236 */
237 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_ALL, false /*fRelocUserRC*/,
238 NULL /*pfnInvalidateR3*/,
[55937]239 patmVirtPageHandler,
[56052]240 "patmVirtPageHandler", "patmRCVirtPagePfHandler",
[55895]241 "PATMMonitorPatchJump", &pVM->patm.s.hMonitorPageType);
242 AssertRCReturn(rc, rc);
243
244 /*
[33540]245 * Register save and load state notifiers.
[1]246 */
[52771]247 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SAVED_STATE_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
[22480]248 NULL, NULL, NULL,
249 NULL, patmR3Save, NULL,
250 NULL, patmR3Load, NULL);
251 AssertRCReturn(rc, rc);
[1]252
253#ifdef VBOX_WITH_DEBUGGER
254 /*
255 * Debugger commands.
256 */
[25777]257 static bool s_fRegisteredCmds = false;
258 if (!s_fRegisteredCmds)
[1]259 {
[25777]260 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
261 if (RT_SUCCESS(rc2))
262 s_fRegisteredCmds = true;
[1]263 }
264#endif
265
266#ifdef VBOX_WITH_STATISTICS
267 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
268 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
269 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
270 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
271 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
272 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
273 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
274 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
275
276 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
277 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
[23]278
[1]279 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
280 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
281 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
[23]282
[1]283 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
284 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
285 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
286 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
287 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
288
289 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
290 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
291
292 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
293 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
294
295 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
296 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
297 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
298
299 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
300 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
301 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
302
303 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
304 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
305
306 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
307 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
308 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
[23]309 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
[1]310
[2030]311 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
312 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
313
[1]314 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
315 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
316
317 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
318 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
319 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
320
321 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
322 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
[1125]323 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
324 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
[1]325
326 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
327 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
328 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
329 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
330 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
331
[23]332 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
[1]333#endif /* VBOX_WITH_STATISTICS */
334
[54687]335 Log(("g_patmCallRecord.cbFunction %u\n", g_patmCallRecord.cbFunction));
336 Log(("g_patmCallIndirectRecord.cbFunction %u\n", g_patmCallIndirectRecord.cbFunction));
337 Log(("g_patmRetRecord.cbFunction %u\n", g_patmRetRecord.cbFunction));
338 Log(("g_patmJumpIndirectRecord.cbFunction %u\n", g_patmJumpIndirectRecord.cbFunction));
339 Log(("g_patmPopf32Record.cbFunction %u\n", g_patmPopf32Record.cbFunction));
340 Log(("g_patmIretRecord.cbFunction %u\n", g_patmIretRecord.cbFunction));
341 Log(("g_patmStiRecord.cbFunction %u\n", g_patmStiRecord.cbFunction));
342 Log(("g_patmCheckIFRecord.cbFunction %u\n", g_patmCheckIFRecord.cbFunction));
[23]343
[1]344 return rc;
345}
346
347/**
348 * Finalizes HMA page attributes.
349 *
350 * @returns VBox status code.
[58122]351 * @param pVM The cross context VM structure.
[1]352 */
[44362]353VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
[1]354{
[70948]355 if (!VM_IS_RAW_MODE_ENABLED(pVM))
[45620]356 return VINF_SUCCESS;
357
[54763]358 /*
359 * The GC state, stack and statistics must be read/write for the guest
360 * (supervisor only of course).
361 *
362 * Remember, we run guest code at ring-1 and ring-2 levels, which are
363 * considered supervisor levels by the paging structures. We run the VMM
364 * in ring-0 with CR0.WP=0 and mapping all VMM structures as read-only
365 * pages. The following structures are exceptions and must be mapped with
366 * write access so the ring-1 and ring-2 code can modify them.
367 */
[1]368 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
[54763]369 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCState accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
[1]370
371 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
[54763]372 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCStack accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
[1]373
374 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
[54763]375 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the stats struct accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
[1]376
[54763]377 /*
378 * Find the patch helper segment so we can identify code running there as patch code.
379 */
380 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpBegin", &pVM->patm.s.pbPatchHelpersRC);
381 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpBegin: %Rrc\n", rc), rc);
382 pVM->patm.s.pbPatchHelpersR3 = (uint8_t *)MMHyperRCToR3(pVM, pVM->patm.s.pbPatchHelpersRC);
383 AssertLogRelReturn(pVM->patm.s.pbPatchHelpersR3 != NULL, VERR_INTERNAL_ERROR_3);
384
385 RTRCPTR RCPtrEnd;
386 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpEnd", &RCPtrEnd);
387 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpEnd: %Rrc\n", rc), rc);
388
389 pVM->patm.s.cbPatchHelpers = RCPtrEnd - pVM->patm.s.pbPatchHelpersRC;
390 AssertLogRelMsgReturn(pVM->patm.s.cbPatchHelpers < _128K,
391 ("%RRv-%RRv => %#x\n", pVM->patm.s.pbPatchHelpersRC, RCPtrEnd, pVM->patm.s.cbPatchHelpers),
392 VERR_INTERNAL_ERROR_4);
[55889]393
394
395 return VINF_SUCCESS;
[1]396}
397
398/**
399 * (Re)initializes PATM
400 *
[58122]401 * @param pVM The cross context VM structure.
[1]402 */
403static int patmReinit(PVM pVM)
404{
405 int rc;
406
407 /*
408 * Assert alignment and sizes.
409 */
[73097]410 AssertRelease(!(RT_UOFFSETOF(VM, patm.s) & 31));
[1]411 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
412
413 /*
414 * Setup any fixed pointers and offsets.
415 */
[73097]416 pVM->patm.s.offVM = RT_UOFFSETOF(VM, patm);
[1]417
[3696]418#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
[1]419#ifndef PATM_DISABLE_ALL
420 pVM->fPATMEnabled = true;
421#endif
[453]422#endif
[1]423
424 Assert(pVM->patm.s.pGCStateHC);
425 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
426 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
[11311]427
[13834]428 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
[1]429 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
430
431 Assert(pVM->patm.s.pGCStackHC);
432 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
433 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
434 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
435 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
436
437 Assert(pVM->patm.s.pStatsHC);
438 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
439 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
440
441 Assert(pVM->patm.s.pPatchMemHC);
[26039]442 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
[1]443 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
444 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
445
446 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
[19240]447 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
[1]448
449 Assert(pVM->patm.s.PatchLookupTreeHC);
[13827]450 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
[1]451
452 /*
453 * (Re)Initialize PATM structure
454 */
455 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
456 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
457 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
458 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
459 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
460 pVM->patm.s.pvFaultMonitor = 0;
461 pVM->patm.s.deltaReloc = 0;
462
463 /* Lowest and highest patched instruction */
[62648]464 pVM->patm.s.pPatchedInstrGCLowest = RTRCPTR_MAX;
[1]465 pVM->patm.s.pPatchedInstrGCHighest = 0;
466
467 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
468 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
469 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
470
471 pVM->patm.s.pfnSysEnterPatchGC = 0;
472 pVM->patm.s.pfnSysEnterGC = 0;
473
474 pVM->patm.s.fOutOfMemory = false;
475
476 pVM->patm.s.pfnHelperCallGC = 0;
[46137]477 patmR3DbgReset(pVM);
[1]478
479 /* Generate all global functions to be used by future patches. */
480 /* We generate a fake patch in order to use the existing code for relocation. */
481 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
[13833]482 if (RT_FAILURE(rc))
[1]483 {
484 Log(("Out of memory!!!!\n"));
485 return VERR_NO_MEMORY;
486 }
487 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
488 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
489 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
490
491 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
492 AssertRC(rc);
493
494 /* Update free pointer in patch memory. */
495 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
496 /* Round to next 8 byte boundary. */
497 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
[46135]498
499
[1]500 return rc;
501}
502
503
504/**
505 * Applies relocations to data and code managed by this
506 * component. This function will be called at init and
507 * whenever the VMM need to relocate it self inside the GC.
508 *
509 * The PATM will update the addresses used by the switcher.
510 *
[58122]511 * @param pVM The cross context VM structure.
[54763]512 * @param offDelta The relocation delta.
[1]513 */
[54763]514VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta)
[1]515{
[70948]516 if (!VM_IS_RAW_MODE_ENABLED(pVM))
[45620]517 return;
518
[13827]519 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
[54763]520 Assert((RTRCINTPTR)(GCPtrNew - pVM->patm.s.pGCStateGC) == offDelta);
[1]521
[54763]522 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, offDelta));
523 if (offDelta)
[1]524 {
525 PCPUMCTX pCtx;
526
527 /* Update CPUMCTX guest context pointer. */
[54763]528 pVM->patm.s.pCPUMCtxGC += offDelta;
[1]529
[54763]530 pVM->patm.s.deltaReloc = offDelta;
531 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmR3RelocatePatches, (void *)pVM);
[1]532
[54763]533 pVM->patm.s.pGCStateGC = GCPtrNew;
534 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
535 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
536 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
537 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
[1]538
[54763]539 if (pVM->patm.s.pfnSysEnterPatchGC)
540 pVM->patm.s.pfnSysEnterPatchGC += offDelta;
[1]541
542 /* If we are running patch code right now, then also adjust EIP. */
[54763]543 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
[1]544 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
[54763]545 pCtx->eip += offDelta;
[1]546
[54763]547 /* Deal with the global patch functions. */
548 pVM->patm.s.pfnHelperCallGC += offDelta;
549 pVM->patm.s.pfnHelperRetGC += offDelta;
550 pVM->patm.s.pfnHelperIretGC += offDelta;
551 pVM->patm.s.pfnHelperJumpGC += offDelta;
[1]552
[54763]553 pVM->patm.s.pbPatchHelpersRC += offDelta;
[1]554
[54763]555 patmR3RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
[1]556 }
557}
558
559
560/**
561 * Terminates the PATM.
562 *
563 * Termination means cleaning up and freeing all resources,
564 * the VM it self is at this point powered off or suspended.
565 *
566 * @returns VBox status code.
[58122]567 * @param pVM The cross context VM structure.
[1]568 */
[44362]569VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
[1]570{
[70948]571 if (!VM_IS_RAW_MODE_ENABLED(pVM))
[45620]572 return VINF_SUCCESS;
573
[46137]574 patmR3DbgTerm(pVM);
[46135]575
[1]576 /* Memory was all allocated from the two MM heaps and requires no freeing. */
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * PATM reset callback.
583 *
584 * @returns VBox status code.
[58122]585 * @param pVM The cross context VM structure.
[1]586 */
[44362]587VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
[1]588{
589 Log(("PATMR3Reset\n"));
[70948]590 if (!VM_IS_RAW_MODE_ENABLED(pVM))
[45620]591 return VINF_SUCCESS;
[1]592
593 /* Free all patches. */
[45620]594 for (;;)
[1]595 {
[9228]596 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
[1]597 if (pPatchRec)
[44362]598 patmR3RemovePatch(pVM, pPatchRec, true);
[1]599 else
600 break;
601 }
602 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
603 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
604 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
605 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
606
607 int rc = patmReinit(pVM);
[13833]608 if (RT_SUCCESS(rc))
[1]609 rc = PATMR3InitFinalize(pVM); /* paranoia */
610
611 return rc;
612}
613
[41760]614/**
615 * @callback_method_impl{FNDISREADBYTES}
616 */
617static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
[1]618{
[41760]619 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
[1]620
[41760]621/** @todo change this to read more! */
[1]622 /*
623 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
624 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
625 */
626 /** @todo could change in the future! */
627 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
628 {
[41770]629 size_t cbRead = cbMaxRead;
630 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
631 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
632 if (RT_SUCCESS(rc))
[1]633 {
[41770]634 if (cbRead >= cbMinRead)
[41760]635 {
[41771]636 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
[41760]637 return VINF_SUCCESS;
638 }
[41770]639
[41771]640 cbMinRead -= (uint8_t)cbRead;
641 cbMaxRead -= (uint8_t)cbRead;
642 offInstr += (uint8_t)cbRead;
[41770]643 uSrcAddr += cbRead;
[1]644 }
[41760]645
[1]646#ifdef VBOX_STRICT
[41760]647 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
648 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
[1]649 {
[41760]650 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
651 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
[1]652 }
653#endif
654 }
655
[41760]656 int rc = VINF_SUCCESS;
657 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
658 if ( !pDisInfo->pbInstrHC
659 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
660 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
[1]661 {
[41658]662 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
[41760]663 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
664 offInstr += cbMinRead;
[1]665 }
[41760]666 else
667 {
[41770]668 /*
669 * pbInstrHC is the base address; adjust according to the GC pointer.
670 *
671 * Try read the max number of bytes here. Since the disassembler only
672 * ever uses these bytes for the current instruction, it doesn't matter
673 * much if we accidentally read the start of the next instruction even
674 * if it happens to be a patch jump or int3.
675 */
[41760]676 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
677 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
[30572]678
[41770]679 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
680 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
681 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
682 if (cbToRead > cbMaxRead)
683 cbToRead = cbMaxRead;
684
685 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
686 offInstr += (uint8_t)cbToRead;
[41760]687 }
[1]688
[41760]689 pDis->cbCachedInstr = offInstr;
690 return rc;
[1]691}
692
[41671]693
694DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
695 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
696{
697 PATMDISASM disinfo;
698 disinfo.pVM = pVM;
699 disinfo.pPatchInfo = pPatch;
[41760]700 disinfo.pbInstrHC = pbInstrHC;
[41671]701 disinfo.pInstrGC = InstrGCPtr32;
702 disinfo.fReadFlags = fReadFlags;
703 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
[41675]704 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
[41671]705 patmReadBytes, &disinfo,
706 pCpu, pcbInstr, pszOutput, cbOutput));
707}
708
709
710DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
711 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
712{
713 PATMDISASM disinfo;
714 disinfo.pVM = pVM;
715 disinfo.pPatchInfo = pPatch;
[41760]716 disinfo.pbInstrHC = pbInstrHC;
[41671]717 disinfo.pInstrGC = InstrGCPtr32;
718 disinfo.fReadFlags = fReadFlags;
[41674]719 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
[41675]720 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
[41674]721 patmReadBytes, &disinfo,
722 pCpu, pcbInstr));
[41671]723}
724
725
726DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
727 uint32_t fReadFlags,
728 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
729{
730 PATMDISASM disinfo;
731 disinfo.pVM = pVM;
732 disinfo.pPatchInfo = pPatch;
[41760]733 disinfo.pbInstrHC = pbInstrHC;
[41671]734 disinfo.pInstrGC = InstrGCPtr32;
735 disinfo.fReadFlags = fReadFlags;
[41674]736 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
737 pCpu, pcbInstr));
[41671]738}
739
740#ifdef LOG_ENABLED
741# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
742 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
743# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
744 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
745
746# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
747 do { \
748 if (LogIsEnabled()) \
749 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
750 } while (0)
751
752static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
753 const char *pszComment1, const char *pszComment2)
754{
755 DISCPUSTATE DisState;
756 char szOutput[128];
757 szOutput[0] = '\0';
758 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
759 &DisState, NULL, szOutput, sizeof(szOutput));
760 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
761}
762
763#else
764# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
765# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
766# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
767#endif
768
769
[1]770/**
[9228]771 * Callback function for RTAvloU32DoWithAll
[1]772 *
773 * Updates all fixups in the patches
774 *
775 * @returns VBox status code.
776 * @param pNode Current node
[41801]777 * @param pParam Pointer to the VM.
[1]778 */
[54763]779static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
[1]780{
781 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
782 PVM pVM = (PVM)pParam;
[9228]783 RTRCINTPTR delta;
[1]784 int rc;
785
786 /* Nothing to do if the patch is not active. */
787 if (pPatch->patch.uState == PATCH_REFUSED)
788 return 0;
789
790 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
[41671]791 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
[1]792
793 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
[9228]794 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
[1]795
796 /*
[54714]797 * Apply fixups.
[1]798 */
[54714]799 AVLPVKEY key = NULL;
800 for (;;)
[1]801 {
[54714]802 /* Get the record that's closest from above (after or equal to key). */
803 PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
804 if (!pRec)
[1]805 break;
806
[54714]807 key = (uint8_t *)pRec->Core.Key + 1; /* search for the next record during the next round. */
[1]808
809 switch (pRec->uType)
810 {
[54714]811 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
[54764]812 Assert(pRec->pDest == pRec->pSource); Assert(PATM_IS_ASMFIX(pRec->pSource));
[54714]813 Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
814 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
815 break;
816
[1]817 case FIXUP_ABSOLUTE:
[13834]818 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
[31064]819 if ( !pRec->pSource
[30572]820 || PATMIsPatchGCAddr(pVM, pRec->pSource))
[1]821 {
[9228]822 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
[1]823 }
824 else
825 {
826 uint8_t curInstr[15];
827 uint8_t oldInstr[15];
828 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
829
830 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
831
832 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
[9228]833 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
[1]834
[18927]835 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
[13833]836 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
[1]837
[9228]838 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
[1]839
[31064]840 if ( rc == VERR_PAGE_NOT_PRESENT
[30572]841 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
[1]842 {
[9228]843 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
[1]844
845 Log(("PATM: Patch page not present -> check later!\n"));
[55889]846 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
847 pPage,
848 pPage + (PAGE_SIZE - 1) /* inclusive! */,
[71720]849 (void *)(uintptr_t)pPage, NIL_RTRCPTR /*pvUserRC*/, NULL /*pszDesc*/);
[13833]850 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
[1]851 }
852 else
853 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
854 {
855 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
856 /*
857 * Disable patch; this is not a good solution
858 */
[63560]859 /** @todo hopefully it was completely overwritten (if the read was successful)!!!! */
[1]860 pPatch->patch.uState = PATCH_DISABLED;
861 }
862 else
[13833]863 if (RT_SUCCESS(rc))
[1]864 {
[9228]865 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
[18927]866 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
[1]867 AssertRC(rc);
868 }
869 }
870 break;
871
872 case FIXUP_REL_JMPTOPATCH:
873 {
[9228]874 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
[1]875
876 if ( pPatch->patch.uState == PATCH_ENABLED
877 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
878 {
879 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
880 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
[9228]881 RTRCPTR pJumpOffGC;
882 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
883 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
[1]884
[31064]885#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
[1]886 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
[31064]887#else
888 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
889#endif
[1]890
891 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
892#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
893 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
894 {
895 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
896
897 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
898 oldJump[0] = pPatch->patch.aPrivInstr[0];
899 oldJump[1] = pPatch->patch.aPrivInstr[1];
[9228]900 *(RTRCUINTPTR *)&oldJump[2] = displOld;
[1]901 }
902 else
903#endif
904 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
905 {
906 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
907 oldJump[0] = 0xE9;
[9228]908 *(RTRCUINTPTR *)&oldJump[1] = displOld;
[1]909 }
910 else
911 {
912 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
913 continue; //this should never happen!!
914 }
915 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
916
917 /*
918 * Read old patch jump and compare it to the one we previously installed
919 */
[18927]920 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
[13833]921 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
[1]922
[31064]923 if ( rc == VERR_PAGE_NOT_PRESENT
[30572]924 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
[1]925 {
[9228]926 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
[55889]927 Log(("PATM: Patch page not present -> check later!\n"));
928 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
929 pPage,
930 pPage + (PAGE_SIZE - 1) /* inclusive! */,
[71720]931 (void *)(uintptr_t)pPage, NIL_RTRCPTR /*pvUserRC*/, NULL /*pszDesc*/);
[13833]932 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
[1]933 }
934 else
935 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
936 {
937 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
938 /*
939 * Disable patch; this is not a good solution
940 */
[63560]941 /** @todo hopefully it was completely overwritten (if the read was successful)!!!! */
[1]942 pPatch->patch.uState = PATCH_DISABLED;
943 }
944 else
[13833]945 if (RT_SUCCESS(rc))
[1]946 {
[18927]947 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
[1]948 AssertRC(rc);
949 }
950 else
951 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
952 }
953 else
[30572]954 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
[1]955
956 pRec->pDest = pTarget;
957 break;
958 }
959
960 case FIXUP_REL_JMPTOGUEST:
961 {
[9228]962 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
963 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
[1]964
965 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
966 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
[9228]967 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
[1]968 pRec->pSource = pSource;
969 break;
970 }
971
[54763]972 case FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL:
[54761]973 case FIXUP_CONSTANT_IN_PATCH_ASM_TMPL:
974 /* Only applicable when loading state. */
975 Assert(pRec->pDest == pRec->pSource);
[54764]976 Assert(PATM_IS_ASMFIX(pRec->pSource));
[54761]977 break;
978
[1]979 default:
980 AssertMsg(0, ("Invalid fixup type!!\n"));
981 return VERR_INVALID_PARAMETER;
982 }
983 }
984
985 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
[41671]986 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
[1]987 return 0;
988}
989
[44362]990#ifdef VBOX_WITH_DEBUGGER
[1]991
992/**
[9228]993 * Callback function for RTAvloU32DoWithAll
[1]994 *
995 * Enables the patch that's being enumerated
996 *
997 * @returns 0 (continue enumeration).
998 * @param pNode Current node
[58122]999 * @param pVM The cross context VM structure.
[1]1000 */
[9228]1001static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
[1]1002{
1003 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1004
[9228]1005 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
[1]1006 return 0;
1007}
1008
1009
1010/**
[9228]1011 * Callback function for RTAvloU32DoWithAll
[1]1012 *
1013 * Disables the patch that's being enumerated
1014 *
1015 * @returns 0 (continue enumeration).
1016 * @param pNode Current node
[58122]1017 * @param pVM The cross context VM structure.
[1]1018 */
[9228]1019static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
[1]1020{
1021 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1022
[9228]1023 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
[1]1024 return 0;
1025}
1026
[44362]1027#endif /* VBOX_WITH_DEBUGGER */
1028
[1]1029/**
1030 * Returns the host context pointer of the GC context structure
1031 *
1032 * @returns VBox status code.
[58122]1033 * @param pVM The cross context VM structure.
[1]1034 */
[44362]1035VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
[1]1036{
[70948]1037 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), NULL);
[1]1038 return pVM->patm.s.pGCStateHC;
1039}
1040
1041
1042/**
1043 * Allows or disallow patching of privileged instructions executed by the guest OS
1044 *
1045 * @returns VBox status code.
[44362]1046 * @param pUVM The user mode VM handle.
1047 * @param fAllowPatching Allow/disallow patching
[1]1048 */
[44362]1049VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
[1]1050{
[44362]1051 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1052 PVM pVM = pUVM->pVM;
1053 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1054
[70948]1055 if (VM_IS_RAW_MODE_ENABLED(pVM))
[45620]1056 pVM->fPATMEnabled = fAllowPatching;
1057 else
1058 Assert(!pVM->fPATMEnabled);
[1]1059 return VINF_SUCCESS;
1060}
1061
[44362]1062
[1]1063/**
[44362]1064 * Checks if the patch manager is enabled or not.
[1]1065 *
[44362]1066 * @returns true if enabled, false if not (or if invalid handle).
1067 * @param pUVM The user mode VM handle.
[1]1068 */
[44362]1069VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
[1]1070{
[44362]1071 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1072 PVM pVM = pUVM->pVM;
1073 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1074 return PATMIsEnabled(pVM);
[1]1075}
1076
[44362]1077
[1]1078/**
[44362]1079 * Convert a GC patch block pointer to a HC patch pointer
[1]1080 *
[44362]1081 * @returns HC pointer or NULL if it's not a GC patch pointer
[58122]1082 * @param pVM The cross context VM structure.
[44362]1083 * @param pAddrGC GC pointer
[1]1084 */
[44362]1085VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
[1]1086{
[70948]1087 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), NULL);
[54763]1088 RTRCUINTPTR offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1089 if (offPatch >= pVM->patm.s.cbPatchMem)
1090 {
1091 offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC;
1092 if (offPatch >= pVM->patm.s.cbPatchHelpers)
1093 return NULL;
1094 return pVM->patm.s.pbPatchHelpersR3 + offPatch;
1095 }
1096 return pVM->patm.s.pPatchMemHC + offPatch;
[1]1097}
1098
1099
1100/**
1101 * Convert guest context address to host context pointer
1102 *
1103 * @returns VBox status code.
[58122]1104 * @param pVM The cross context VM structure.
[30572]1105 * @param pCacheRec Address conversion cache record
[1]1106 * @param pGCPtr Guest context pointer
1107 *
1108 * @returns Host context pointer or NULL in case of an error
1109 *
1110 */
[44362]1111R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
[1]1112{
1113 int rc;
[4776]1114 R3PTRTYPE(uint8_t *) pHCPtr;
[1]1115 uint32_t offset;
1116
[54763]1117 offset = (RTRCUINTPTR)pGCPtr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1118 if (offset < pVM->patm.s.cbPatchMem)
[1]1119 {
[54763]1120#ifdef VBOX_STRICT
[30572]1121 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
[54763]1122 Assert(pPatch); Assert(offset - pPatch->pPatchBlockOffset < pPatch->cbPatchBlockSize);
1123#endif
1124 return pVM->patm.s.pPatchMemHC + offset;
[1]1125 }
[54763]1126 /* Note! We're _not_ including the patch helpers here. */
[1]1127
1128 offset = pGCPtr & PAGE_OFFSET_MASK;
[30572]1129 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1130 return pCacheRec->pPageLocStartHC + offset;
1131
1132 /* Release previous lock if any. */
1133 if (pCacheRec->Lock.pvMap)
[1]1134 {
[30572]1135 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1136 pCacheRec->Lock.pvMap = NULL;
[1]1137 }
1138
[30572]1139 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
[1]1140 if (rc != VINF_SUCCESS)
1141 {
1142 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1143 return NULL;
1144 }
[30572]1145 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1146 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
[1]1147 return pHCPtr;
1148}
1149
1150
[45620]1151/**
1152 * Calculates and fills in all branch targets
[1]1153 *
1154 * @returns VBox status code.
[58122]1155 * @param pVM The cross context VM structure.
[1]1156 * @param pPatch Current patch block pointer
1157 *
1158 */
1159static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1160{
1161 int32_t displ;
1162
1163 PJUMPREC pRec = 0;
[30575]1164 unsigned nrJumpRecs = 0;
[1]1165
1166 /*
1167 * Set all branch targets inside the patch block.
1168 * We remove all jump records as they are no longer needed afterwards.
1169 */
1170 while (true)
1171 {
[9212]1172 RCPTRTYPE(uint8_t *) pInstrGC;
1173 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
[1]1174
1175 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1176 if (pRec == 0)
1177 break;
1178
1179 nrJumpRecs++;
1180
1181 /* HC in patch block to GC in patch block. */
1182 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1183
1184 if (pRec->opcode == OP_CALL)
1185 {
1186 /* Special case: call function replacement patch from this patch block.
1187 */
[44362]1188 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
[12855]1189 if (!pFunctionRec)
[1]1190 {
1191 int rc;
1192
1193 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1194 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1195 else
1196 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1197
[13833]1198 if (RT_FAILURE(rc))
[1]1199 {
1200 uint8_t *pPatchHC;
[9228]1201 RTRCPTR pPatchGC;
1202 RTRCPTR pOrgInstrGC;
[1]1203
1204 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1205 Assert(pOrgInstrGC);
1206
1207 /* Failure for some reason -> mark exit point with int 3. */
[13834]1208 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
[1]1209
1210 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1211 Assert(pPatchGC);
1212
1213 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1214
1215 /* Set a breakpoint at the very beginning of the recompiled instruction */
1216 *pPatchHC = 0xCC;
1217
1218 continue;
1219 }
1220 }
[12855]1221 else
1222 {
[13834]1223 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
[12855]1224 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1225 }
1226
[1]1227 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1228 }
1229 else
1230 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1231
1232 if (pBranchTargetGC == 0)
1233 {
1234 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1235 return VERR_PATCHING_REFUSED;
1236 }
1237 /* Our jumps *always* have a dword displacement (to make things easier). */
[9228]1238 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1239 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1240 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1241 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
[1]1242 }
1243 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1244 Assert(pPatch->JumpTree == 0);
1245 return VINF_SUCCESS;
1246}
1247
[45620]1248/**
1249 * Add an illegal instruction record
[1]1250 *
[58122]1251 * @param pVM The cross context VM structure.
[1]1252 * @param pPatch Patch structure ptr
1253 * @param pInstrGC Guest context pointer to privileged instruction
1254 *
1255 */
[9228]1256static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
[1]1257{
1258 PAVLPVNODECORE pRec;
1259
1260 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1261 Assert(pRec);
[36912]1262 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
[1]1263
1264 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1265 Assert(ret); NOREF(ret);
1266 pPatch->pTempInfo->nrIllegalInstr++;
1267}
1268
[9228]1269static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
[1]1270{
1271 PAVLPVNODECORE pRec;
1272
[36912]1273 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
[1]1274 if (pRec)
1275 return true;
[30572]1276 else
1277 return false;
[1]1278}
1279
1280/**
1281 * Add a patch to guest lookup record
1282 *
[58122]1283 * @param pVM The cross context VM structure.
[1]1284 * @param pPatch Patch structure ptr
1285 * @param pPatchInstrHC Guest context pointer to patch block
1286 * @param pInstrGC Guest context pointer to privileged instruction
1287 * @param enmType Lookup type
1288 * @param fDirty Dirty flag
1289 *
[45620]1290 * @note Be extremely careful with this function. Make absolutely sure the guest
1291 * address is correct! (to avoid executing instructions twice!)
[1]1292 */
[44362]1293void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
[1]1294{
1295 bool ret;
1296 PRECPATCHTOGUEST pPatchToGuestRec;
1297 PRECGUESTTOPATCH pGuestToPatchRec;
[93]1298 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
[1]1299
[36669]1300 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1301 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1302
[1]1303 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1304 {
[93]1305 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1306 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
[1]1307 return; /* already there */
1308
1309 Assert(!pPatchToGuestRec);
1310 }
1311#ifdef VBOX_STRICT
1312 else
1313 {
[93]1314 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
[1]1315 Assert(!pPatchToGuestRec);
1316 }
1317#endif
1318
1319 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1320 Assert(pPatchToGuestRec);
[93]1321 pPatchToGuestRec->Core.Key = PatchOffset;
[1]1322 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1323 pPatchToGuestRec->enmType = enmType;
[2030]1324 pPatchToGuestRec->fDirty = fDirty;
[1]1325
[93]1326 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
[1]1327 Assert(ret);
1328
1329 /* GC to patch address */
1330 if (enmType == PATM_LOOKUP_BOTHDIR)
1331 {
[9228]1332 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
[1]1333 if (!pGuestToPatchRec)
1334 {
1335 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
[102]1336 pGuestToPatchRec->Core.Key = pInstrGC;
[1]1337 pGuestToPatchRec->PatchOffset = PatchOffset;
1338
[9228]1339 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
[1]1340 Assert(ret);
1341 }
1342 }
1343
1344 pPatch->nrPatch2GuestRecs++;
1345}
1346
1347
1348/**
1349 * Removes a patch to guest lookup record
1350 *
[58122]1351 * @param pVM The cross context VM structure.
[1]1352 * @param pPatch Patch structure ptr
1353 * @param pPatchInstrGC Guest context pointer to patch block
1354 */
[9228]1355void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
[1]1356{
[102]1357 PAVLU32NODECORE pNode;
[9228]1358 PAVLU32NODECORE pNode2;
[102]1359 PRECPATCHTOGUEST pPatchToGuestRec;
1360 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
[1]1361
[93]1362 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
[1]1363 Assert(pPatchToGuestRec);
1364 if (pPatchToGuestRec)
1365 {
1366 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1367 {
1368 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1369
1370 Assert(pGuestToPatchRec->Core.Key);
[9228]1371 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
[93]1372 Assert(pNode2);
[1]1373 }
[93]1374 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
[1]1375 Assert(pNode);
1376
1377 MMR3HeapFree(pPatchToGuestRec);
1378 pPatch->nrPatch2GuestRecs--;
1379 }
1380}
1381
1382
[93]1383/**
1384 * RTAvlPVDestroy callback.
1385 */
1386static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1387{
1388 MMR3HeapFree(pNode);
1389 return 0;
1390}
1391
1392/**
1393 * Empty the specified tree (PV tree, MMR3 heap)
[1]1394 *
[58122]1395 * @param pVM The cross context VM structure.
[1]1396 * @param ppTree Tree to empty
1397 */
[44362]1398static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
[1]1399{
[39078]1400 NOREF(pVM);
[93]1401 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1402}
[1]1403
1404
[93]1405/**
1406 * RTAvlU32Destroy callback.
1407 */
1408static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1409{
1410 MMR3HeapFree(pNode);
1411 return 0;
[1]1412}
1413
[93]1414/**
1415 * Empty the specified tree (U32 tree, MMR3 heap)
1416 *
[58122]1417 * @param pVM The cross context VM structure.
[93]1418 * @param ppTree Tree to empty
1419 */
[44362]1420static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
[93]1421{
[39078]1422 NOREF(pVM);
[93]1423 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1424}
[1]1425
[93]1426
[1]1427/**
1428 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1429 *
1430 * @returns VBox status code.
[58122]1431 * @param pVM The cross context VM structure.
[1]1432 * @param pCpu CPU disassembly state
1433 * @param pInstrGC Guest context pointer to privileged instruction
1434 * @param pCurInstrGC Guest context pointer to the current instruction
[30572]1435 * @param pCacheRec Cache record ptr
[1]1436 *
1437 */
[30572]1438static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
[1]1439{
[30572]1440 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
[1]1441 bool fIllegalInstr = false;
1442
[36801]1443 /*
1444 * Preliminary heuristics:
1445 *- no call instructions without a fixed displacement between cli and sti/popf
1446 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1447 *- no nested pushf/cli
1448 *- sti/popf should be the (eventual) target of all branches
1449 *- no near or far returns; no int xx, no into
1450 *
1451 * Note: Later on we can impose less stricter guidelines if the need arises
1452 */
[1]1453
1454 /* Bail out if the patch gets too big. */
1455 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1456 {
[13834]1457 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
[1]1458 fIllegalInstr = true;
1459 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1460 }
1461 else
1462 {
[33540]1463 /* No unconditional jumps or calls without fixed displacements. */
[41738]1464 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
[41737]1465 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
[1]1466 )
1467 {
[41739]1468 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1469 if ( pCpu->Param1.cb == 6 /* far call/jmp */
[41737]1470 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
[41738]1471 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
[1]1472 )
1473 {
1474 fIllegalInstr = true;
1475 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1476 }
1477 }
1478
1479 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
[41737]1480 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
[1]1481 {
[36801]1482 if ( pCurInstrGC > pPatch->pPrivInstrGC
[41732]1483 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
[1]1484 {
1485 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1486 /* We turn this one into a int 3 callable patch. */
1487 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1488 }
1489 }
1490 else
1491 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1492 if (pPatch->opcode == OP_PUSHF)
1493 {
[41737]1494 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
[1]1495 {
1496 fIllegalInstr = true;
1497 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1498 }
1499 }
1500
[36801]1501 /* no far returns */
[41737]1502 if (pCpu->pCurInstr->uOpcode == OP_RETF)
[1]1503 {
1504 pPatch->pTempInfo->nrRetInstr++;
1505 fIllegalInstr = true;
1506 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1507 }
[41737]1508 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1509 || pCpu->pCurInstr->uOpcode == OP_INT
1510 || pCpu->pCurInstr->uOpcode == OP_INTO)
[1]1511 {
[36801]1512 /* No int xx or into either. */
[1]1513 fIllegalInstr = true;
1514 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1515 }
1516 }
1517
[41732]1518 pPatch->cbPatchBlockSize += pCpu->cbInstr;
[1]1519
1520 /* Illegal instruction -> end of analysis phase for this code block */
1521 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1522 return VINF_SUCCESS;
1523
1524 /* Check for exit points. */
[41737]1525 switch (pCpu->pCurInstr->uOpcode)
[1]1526 {
1527 case OP_SYSEXIT:
1528 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1529
1530 case OP_SYSENTER:
1531 case OP_ILLUD2:
[36801]1532 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
[1]1533 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1534 return VINF_SUCCESS;
1535
1536 case OP_STI:
1537 case OP_POPF:
1538 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1539 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1540 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1541 {
1542 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1543 return VERR_PATCHING_REFUSED;
1544 }
1545 if (pPatch->opcode == OP_PUSHF)
1546 {
[41737]1547 if (pCpu->pCurInstr->uOpcode == OP_POPF)
[1]1548 {
1549 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1550 return VINF_SUCCESS;
1551
1552 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1553 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1554 pPatch->flags |= PATMFL_CHECK_SIZE;
1555 }
[36801]1556 break; /* sti doesn't mark the end of a pushf block; only popf does. */
[1]1557 }
[69046]1558 RT_FALL_THRU();
[1]1559 case OP_RETN: /* exit point for function replacement */
1560 return VINF_SUCCESS;
1561
1562 case OP_IRET:
1563 return VINF_SUCCESS; /* exitpoint */
1564
1565 case OP_CPUID:
1566 case OP_CALL:
1567 case OP_JMP:
1568 break;
1569
[45485]1570#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
[45276]1571 case OP_STR:
1572 break;
1573#endif
1574
[1]1575 default:
[41738]1576 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
[1]1577 {
1578 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1579 return VINF_SUCCESS; /* exit point */
1580 }
1581 break;
1582 }
1583
[36801]1584 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
[41738]1585 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
[1]1586 {
[36801]1587 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
[41732]1588 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
[1]1589 return VINF_SUCCESS;
1590 }
1591
1592 return VWRN_CONTINUE_ANALYSIS;
1593}
1594
1595/**
1596 * Analyses the instructions inside a function for compliance
1597 *
1598 * @returns VBox status code.
[58122]1599 * @param pVM The cross context VM structure.
[1]1600 * @param pCpu CPU disassembly state
1601 * @param pInstrGC Guest context pointer to privileged instruction
1602 * @param pCurInstrGC Guest context pointer to the current instruction
[30572]1603 * @param pCacheRec Cache record ptr
[1]1604 *
1605 */
[30572]1606static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
[1]1607{
[30572]1608 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
[1]1609 bool fIllegalInstr = false;
[39078]1610 NOREF(pInstrGC);
[1]1611
1612 //Preliminary heuristics:
1613 //- no call instructions
1614 //- ret ends a block
1615
1616 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1617
1618 // bail out if the patch gets too big
1619 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1620 {
[13834]1621 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
[1]1622 fIllegalInstr = true;
1623 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1624 }
1625 else
1626 {
[33540]1627 // no unconditional jumps or calls without fixed displacements
[41738]1628 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
[41737]1629 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
[1]1630 )
1631 {
[41739]1632 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1633 if ( pCpu->Param1.cb == 6 /* far call/jmp */
[41737]1634 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
[41738]1635 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
[1]1636 )
1637 {
1638 fIllegalInstr = true;
1639 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1640 }
1641 }
1642 else /* no far returns */
[41737]1643 if (pCpu->pCurInstr->uOpcode == OP_RETF)
[1]1644 {
1645 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1646 fIllegalInstr = true;
1647 }
1648 else /* no int xx or into either */
[41737]1649 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
[1]1650 {
1651 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1652 fIllegalInstr = true;
1653 }
1654
1655 #if 0
[63560]1656 /// @todo we can handle certain in/out and privileged instructions in the guest context
[41738]1657 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
[1]1658 {
1659 Log(("Illegal instructions for function patch!!\n"));
1660 return VERR_PATCHING_REFUSED;
1661 }
1662 #endif
1663 }
1664
[41732]1665 pPatch->cbPatchBlockSize += pCpu->cbInstr;
[1]1666
1667 /* Illegal instruction -> end of analysis phase for this code block */
1668 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1669 {
1670 return VINF_SUCCESS;
1671 }
1672
1673 // Check for exit points
[41737]1674 switch (pCpu->pCurInstr->uOpcode)
[1]1675 {
1676 case OP_ILLUD2:
1677 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1678 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1679 return VINF_SUCCESS;
1680
1681 case OP_IRET:
1682 case OP_SYSEXIT: /* will fault or emulated in GC */
1683 case OP_RETN:
1684 return VINF_SUCCESS;
1685
[45485]1686#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
[45276]1687 case OP_STR:
1688 break;
1689#endif
1690
[1]1691 case OP_POPF:
1692 case OP_STI:
1693 return VWRN_CONTINUE_ANALYSIS;
1694 default:
[41738]1695 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
[1]1696 {
1697 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1698 return VINF_SUCCESS; /* exit point */
1699 }
1700 return VWRN_CONTINUE_ANALYSIS;
1701 }
1702
1703 return VWRN_CONTINUE_ANALYSIS;
1704}
1705
1706/**
1707 * Recompiles the instructions in a code block
1708 *
1709 * @returns VBox status code.
[58122]1710 * @param pVM The cross context VM structure.
[1]1711 * @param pCpu CPU disassembly state
1712 * @param pInstrGC Guest context pointer to privileged instruction
1713 * @param pCurInstrGC Guest context pointer to the current instruction
[30572]1714 * @param pCacheRec Cache record ptr
[1]1715 *
1716 */
[57389]1717static DECLCALLBACK(int) patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
[1]1718{
[30572]1719 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1720 int rc = VINF_SUCCESS;
1721 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
[1]1722
[13834]1723 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
[1]1724
1725 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1726 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1727 {
1728 /*
1729 * Been there, done that; so insert a jump (we don't want to duplicate code)
1730 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1731 */
[13834]1732 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
[41734]1733 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
[1]1734 }
1735
1736 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1737 {
[30572]1738 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
[1]1739 }
1740 else
[30572]1741 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
[1]1742
[13833]1743 if (RT_FAILURE(rc))
[1]1744 return rc;
1745
[31437]1746 /* Note: Never do a direct return unless a failure is encountered! */
[1]1747
1748 /* Clear recompilation of next instruction flag; we are doing that right here. */
1749 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1750 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1751
1752 /* Add lookup record for patch to guest address translation */
[44362]1753 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
[1]1754
1755 /* Update lowest and highest instruction address for this patch */
1756 if (pCurInstrGC < pPatch->pInstrGCLowest)
1757 pPatch->pInstrGCLowest = pCurInstrGC;
1758 else
1759 if (pCurInstrGC > pPatch->pInstrGCHighest)
[41732]1760 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
[1]1761
1762 /* Illegal instruction -> end of recompile phase for this code block. */
1763 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1764 {
[13834]1765 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
[1]1766 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1767 goto end;
1768 }
1769
1770 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1771 * Indirect calls are handled below.
1772 */
[41738]1773 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
[41737]1774 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
[41738]1775 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
[1]1776 {
[9212]1777 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
[1]1778 if (pTargetGC == 0)
1779 {
[41739]1780 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
[1]1781 return VERR_PATCHING_REFUSED;
1782 }
1783
[41737]1784 if (pCpu->pCurInstr->uOpcode == OP_CALL)
[1]1785 {
[347]1786 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
[1]1787 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
[13833]1788 if (RT_FAILURE(rc))
[1]1789 goto end;
1790 }
1791 else
[41737]1792 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
[1]1793
[13833]1794 if (RT_SUCCESS(rc))
[1]1795 rc = VWRN_CONTINUE_RECOMPILE;
1796
1797 goto end;
1798 }
1799
[41737]1800 switch (pCpu->pCurInstr->uOpcode)
[1]1801 {
1802 case OP_CLI:
1803 {
1804 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1805 * until we've found the proper exit point(s).
1806 */
1807 if ( pCurInstrGC != pInstrGC
1808 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1809 )
1810 {
1811 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1812 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1813 }
1814 /* Set by irq inhibition; no longer valid now. */
1815 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1816
1817 rc = patmPatchGenCli(pVM, pPatch);
[13833]1818 if (RT_SUCCESS(rc))
[1]1819 rc = VWRN_CONTINUE_RECOMPILE;
1820 break;
1821 }
1822
1823 case OP_MOV:
[41738]1824 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
[1]1825 {
1826 /* mov ss, src? */
[41739]1827 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
[41744]1828 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
[1]1829 {
[13834]1830 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
[1]1831 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
[2043]1832 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
[1]1833 }
[2049]1834#if 0 /* necessary for Haiku */
[2043]1835 else
[41739]1836 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
[41744]1837 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
[41739]1838 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
[2043]1839 {
1840 /* mov GPR, ss */
[2046]1841 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
[13833]1842 if (RT_SUCCESS(rc))
[2043]1843 rc = VWRN_CONTINUE_RECOMPILE;
1844 break;
1845 }
[2049]1846#endif
[1]1847 }
1848 goto duplicate_instr;
1849
1850 case OP_POP:
[45276]1851 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
[41738]1852 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
[1]1853 {
[41738]1854 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
[1]1855
[13834]1856 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
[1]1857 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1858 }
1859 goto duplicate_instr;
1860
1861 case OP_STI:
1862 {
[9228]1863 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
[1]1864
[25777]1865 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
[1]1866 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1867 {
1868 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1869 fInhibitIRQInstr = true;
[41732]1870 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
[13834]1871 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
[1]1872 }
1873 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1874
[13833]1875 if (RT_SUCCESS(rc))
[1]1876 {
1877 DISCPUSTATE cpu = *pCpu;
[41732]1878 unsigned cbInstr;
[1]1879 int disret;
[25777]1880 RCPTRTYPE(uint8_t *) pReturnInstrGC;
[1]1881
1882 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1883
[41732]1884 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
[30572]1885 { /* Force pNextInstrHC out of scope after using it */
[44362]1886 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
[30572]1887 if (pNextInstrHC == NULL)
1888 {
1889 AssertFailed();
1890 return VERR_PATCHING_REFUSED;
1891 }
1892
1893 // Disassemble the next instruction
[41732]1894 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
[1]1895 }
1896 if (disret == false)
1897 {
1898 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1899 return VERR_PATCHING_REFUSED;
1900 }
[41732]1901 pReturnInstrGC = pNextInstrGC + cbInstr;
[1]1902
1903 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1904 || pReturnInstrGC <= pInstrGC
1905 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1906 )
1907 {
1908 /* Not an exit point for function duplication patches */
1909 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
[13833]1910 && RT_SUCCESS(rc))
[1]1911 {
1912 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1913 rc = VWRN_CONTINUE_RECOMPILE;
1914 }
1915 else
1916 rc = VINF_SUCCESS; //exit point
1917 }
1918 else {
1919 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1920 rc = VERR_PATCHING_REFUSED; //not allowed!!
1921 }
1922 }
1923 break;
1924 }
1925
1926 case OP_POPF:
1927 {
[41732]1928 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
[1]1929
1930 /* Not an exit point for IDT handler or function replacement patches */
[4057]1931 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
[1]1932 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1933 fGenerateJmpBack = false;
1934
[41734]1935 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
[13833]1936 if (RT_SUCCESS(rc))
[1]1937 {
1938 if (fGenerateJmpBack == false)
1939 {
1940 /* Not an exit point for IDT handler or function replacement patches */
1941 rc = VWRN_CONTINUE_RECOMPILE;
1942 }
1943 else
1944 {
1945 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1946 rc = VINF_SUCCESS; /* exit point! */
1947 }
1948 }
1949 break;
1950 }
1951
1952 case OP_PUSHF:
[41734]1953 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
[13833]1954 if (RT_SUCCESS(rc))
[1]1955 rc = VWRN_CONTINUE_RECOMPILE;
1956 break;
1957
1958 case OP_PUSH:
[45276]1959 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
[41738]1960 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
[1]1961 {
1962 rc = patmPatchGenPushCS(pVM, pPatch);
[13833]1963 if (RT_SUCCESS(rc))
[1]1964 rc = VWRN_CONTINUE_RECOMPILE;
1965 break;
1966 }
1967 goto duplicate_instr;
1968
1969 case OP_IRET:
[13834]1970 Log(("IRET at %RRv\n", pCurInstrGC));
[41734]1971 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
[13833]1972 if (RT_SUCCESS(rc))
[1]1973 {
1974 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1975 rc = VINF_SUCCESS; /* exit point by definition */
1976 }
1977 break;
1978
1979 case OP_ILLUD2:
1980 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1981 rc = patmPatchGenIllegalInstr(pVM, pPatch);
[13833]1982 if (RT_SUCCESS(rc))
[1]1983 rc = VINF_SUCCESS; /* exit point by definition */
1984 Log(("Illegal opcode (0xf 0xb)\n"));
1985 break;
1986
1987 case OP_CPUID:
1988 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
[13833]1989 if (RT_SUCCESS(rc))
[1]1990 rc = VWRN_CONTINUE_RECOMPILE;
1991 break;
1992
1993 case OP_STR:
[63570]1994#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into ifndef */
[45276]1995 /* Now safe because our shadow TR entry is identical to the guest's. */
1996 goto duplicate_instr;
1997#endif
[1]1998 case OP_SLDT:
1999 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
[13833]2000 if (RT_SUCCESS(rc))
[1]2001 rc = VWRN_CONTINUE_RECOMPILE;
2002 break;
2003
2004 case OP_SGDT:
2005 case OP_SIDT:
2006 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
[13833]2007 if (RT_SUCCESS(rc))
[1]2008 rc = VWRN_CONTINUE_RECOMPILE;
2009 break;
2010
2011 case OP_RETN:
2012 /* retn is an exit point for function patches */
2013 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
[13833]2014 if (RT_SUCCESS(rc))
[1]2015 rc = VINF_SUCCESS; /* exit point by definition */
2016 break;
2017
2018 case OP_SYSEXIT:
2019 /* Duplicate it, so it can be emulated in GC (or fault). */
2020 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
[13833]2021 if (RT_SUCCESS(rc))
[1]2022 rc = VINF_SUCCESS; /* exit point by definition */
2023 break;
2024
2025 case OP_CALL:
2026 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2027 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2028 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2029 */
[41739]2030 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2031 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
[1]2032 {
[9228]2033 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
[13833]2034 if (RT_SUCCESS(rc))
[1]2035 {
2036 rc = VWRN_CONTINUE_RECOMPILE;
2037 }
2038 break;
2039 }
2040 goto gen_illegal_instr;
2041
2042 case OP_JMP:
2043 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2044 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2045 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2046 */
[41739]2047 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2048 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
[1]2049 {
2050 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
[13833]2051 if (RT_SUCCESS(rc))
[1]2052 rc = VINF_SUCCESS; /* end of branch */
2053 break;
2054 }
2055 goto gen_illegal_instr;
2056
2057 case OP_INT3:
2058 case OP_INT:
2059 case OP_INTO:
2060 goto gen_illegal_instr;
2061
2062 case OP_MOV_DR:
[31437]2063 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
[41738]2064 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
[1]2065 {
2066 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
[13833]2067 if (RT_SUCCESS(rc))
[1]2068 rc = VWRN_CONTINUE_RECOMPILE;
2069 break;
2070 }
2071 goto duplicate_instr;
2072
2073 case OP_MOV_CR:
[31437]2074 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
[41738]2075 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
[1]2076 {
2077 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
[13833]2078 if (RT_SUCCESS(rc))
[1]2079 rc = VWRN_CONTINUE_RECOMPILE;
2080 break;
2081 }
2082 goto duplicate_instr;
2083
2084 default:
[41738]2085 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
[1]2086 {
2087gen_illegal_instr:
2088 rc = patmPatchGenIllegalInstr(pVM, pPatch);
[13833]2089 if (RT_SUCCESS(rc))
[1]2090 rc = VINF_SUCCESS; /* exit point by definition */
2091 }
2092 else
2093 {
2094duplicate_instr:
2095 Log(("patmPatchGenDuplicate\n"));
2096 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
[13833]2097 if (RT_SUCCESS(rc))
[1]2098 rc = VWRN_CONTINUE_RECOMPILE;
2099 }
2100 break;
2101 }
2102
2103end:
2104
2105 if ( !fInhibitIRQInstr
2106 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2107 {
2108 int rc2;
[41732]2109 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
[1]2110
2111 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
[13834]2112 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
[1]2113 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2114 {
[13834]2115 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
[1]2116
2117 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2118 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2119 rc = VINF_SUCCESS; /* end of the line */
2120 }
2121 else
2122 {
2123 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2124 }
[13833]2125 if (RT_FAILURE(rc2))
[1]2126 rc = rc2;
2127 }
2128
[13833]2129 if (RT_SUCCESS(rc))
[1]2130 {
2131 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2132 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
[41732]2133 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
[41738]2134 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
[1]2135 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2136 )
2137 {
[41732]2138 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
[1]2139
2140 // The end marker for this kind of patch is any instruction at a location outside our patch jump
[41732]2141 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
[1]2142
2143 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2144 AssertRC(rc);
2145 }
2146 }
2147 return rc;
2148}
2149
2150
2151#ifdef LOG_ENABLED
2152
[45620]2153/**
2154 * Add a disasm jump record (temporary for prevent duplicate analysis)
[1]2155 *
[58122]2156 * @param pVM The cross context VM structure.
[1]2157 * @param pPatch Patch structure ptr
2158 * @param pInstrGC Guest context pointer to privileged instruction
2159 *
2160 */
[9228]2161static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
[1]2162{
2163 PAVLPVNODECORE pRec;
2164
2165 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2166 Assert(pRec);
[36969]2167 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
[1]2168
2169 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2170 Assert(ret);
2171}
2172
2173/**
2174 * Checks if jump target has been analysed before.
2175 *
2176 * @returns VBox status code.
2177 * @param pPatch Patch struct
2178 * @param pInstrGC Jump target
2179 *
2180 */
[9228]2181static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
[1]2182{
2183 PAVLPVNODECORE pRec;
2184
[36969]2185 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
[1]2186 if (pRec)
2187 return true;
2188 return false;
2189}
2190
2191/**
2192 * For proper disassembly of the final patch block
2193 *
2194 * @returns VBox status code.
[58122]2195 * @param pVM The cross context VM structure.
[1]2196 * @param pCpu CPU disassembly state
2197 * @param pInstrGC Guest context pointer to privileged instruction
2198 * @param pCurInstrGC Guest context pointer to the current instruction
[30572]2199 * @param pCacheRec Cache record ptr
[1]2200 *
2201 */
[57411]2202DECLCALLBACK(int) patmR3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC,
2203 RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
[1]2204{
[30572]2205 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
[39078]2206 NOREF(pInstrGC);
[1]2207
[41737]2208 if (pCpu->pCurInstr->uOpcode == OP_INT3)
[1]2209 {
2210 /* Could be an int3 inserted in a call patch. Check to be sure */
2211 DISCPUSTATE cpu;
[9228]2212 RTRCPTR pOrgJumpGC;
[1]2213
2214 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2215
[30572]2216 { /* Force pOrgJumpHC out of scope after using it */
[44362]2217 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
[1]2218
[41671]2219 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
[41739]2220 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
[30572]2221 return VINF_SUCCESS;
2222 }
[1]2223 return VWRN_CONTINUE_ANALYSIS;
2224 }
2225
[41737]2226 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
[1]2227 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2228 {
2229 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2230 return VWRN_CONTINUE_ANALYSIS;
2231 }
2232
[41737]2233 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2234 || pCpu->pCurInstr->uOpcode == OP_INT
2235 || pCpu->pCurInstr->uOpcode == OP_IRET
2236 || pCpu->pCurInstr->uOpcode == OP_RETN
2237 || pCpu->pCurInstr->uOpcode == OP_RETF
[1]2238 )
2239 {
2240 return VINF_SUCCESS;
2241 }
2242
[41737]2243 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
[1]2244 return VINF_SUCCESS;
2245
2246 return VWRN_CONTINUE_ANALYSIS;
2247}
2248
2249
2250/**
2251 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2252 *
2253 * @returns VBox status code.
[58122]2254 * @param pVM The cross context VM structure.
[1]2255 * @param pInstrGC Guest context pointer to the initial privileged instruction
2256 * @param pCurInstrGC Guest context pointer to the current instruction
2257 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
[30572]2258 * @param pCacheRec Cache record ptr
[1]2259 *
2260 */
[30572]2261int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
[1]2262{
2263 DISCPUSTATE cpu;
[30572]2264 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
[1]2265 int rc = VWRN_CONTINUE_ANALYSIS;
[41732]2266 uint32_t cbInstr, delta;
[4776]2267 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
[1]2268 bool disret;
2269 char szOutput[256];
2270
2271 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2272
2273 /* We need this to determine branch targets (and for disassembling). */
2274 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2275
[41671]2276 while (rc == VWRN_CONTINUE_ANALYSIS)
[1]2277 {
[44362]2278 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
[1]2279 if (pCurInstrHC == NULL)
2280 {
2281 rc = VERR_PATCHING_REFUSED;
2282 goto end;
2283 }
2284
[41671]2285 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
[41732]2286 &cpu, &cbInstr, szOutput, sizeof(szOutput));
[1]2287 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2288 {
[9228]2289 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
[1]2290
2291 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
[13834]2292 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
[1]2293 else
2294 Log(("DIS %s", szOutput));
2295
2296 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2297 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2298 {
2299 rc = VINF_SUCCESS;
2300 goto end;
2301 }
2302 }
2303 else
2304 Log(("DIS: %s", szOutput));
2305
2306 if (disret == false)
2307 {
2308 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2309 rc = VINF_SUCCESS;
2310 goto end;
2311 }
2312
[30572]2313 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
[1]2314 if (rc != VWRN_CONTINUE_ANALYSIS) {
2315 break; //done!
2316 }
2317
2318 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
[41738]2319 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2320 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
[41737]2321 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
[1]2322 )
2323 {
[9228]2324 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2325 RTRCPTR pOrgTargetGC;
[1]2326
2327 if (pTargetGC == 0)
2328 {
[41739]2329 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
[1]2330 rc = VERR_PATCHING_REFUSED;
2331 break;
2332 }
2333
2334 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2335 {
2336 //jump back to guest code
2337 rc = VINF_SUCCESS;
2338 goto end;
2339 }
2340 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2341
2342 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2343 {
2344 rc = VINF_SUCCESS;
2345 goto end;
2346 }
2347
2348 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2349 {
2350 /* New jump, let's check it. */
2351 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2352
[41737]2353 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
[30572]2354 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
[41737]2355 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
[1]2356
2357 if (rc != VINF_SUCCESS) {
2358 break; //done!
2359 }
2360 }
[41737]2361 if (cpu.pCurInstr->uOpcode == OP_JMP)
[1]2362 {
2363 /* Unconditional jump; return to caller. */
2364 rc = VINF_SUCCESS;
2365 goto end;
2366 }
2367
2368 rc = VWRN_CONTINUE_ANALYSIS;
2369 }
[41732]2370 pCurInstrGC += cbInstr;
[1]2371 }
2372end:
2373 return rc;
2374}
2375
2376/**
2377 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2378 *
2379 * @returns VBox status code.
[58122]2380 * @param pVM The cross context VM structure.
[1]2381 * @param pInstrGC Guest context pointer to the initial privileged instruction
2382 * @param pCurInstrGC Guest context pointer to the current instruction
2383 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
[30572]2384 * @param pCacheRec Cache record ptr
[1]2385 *
2386 */
[30572]2387int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
[1]2388{
[30572]2389 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
[1]2390
[30572]2391 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
[1]2392 /* Free all disasm jump records. */
2393 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2394 return rc;
2395}
2396
2397#endif /* LOG_ENABLED */
2398
2399/**
2400 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2401 * If so, this patch is permanently disabled.
2402 *
[58122]2403 * @param pVM The cross context VM structure.
[1]2404 * @param pInstrGC Guest context pointer to instruction
2405 * @param pConflictGC Guest context pointer to check
2406 *
2407 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2408 *
2409 */
[44362]2410VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
[1]2411{
[70948]2412 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATCH_NO_CONFLICT);
[44362]2413 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
[1]2414 if (pTargetPatch)
2415 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2416 return VERR_PATCH_NO_CONFLICT;
2417}
2418
2419/**
2420 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2421 *
2422 * @returns VBox status code.
[58122]2423 * @param pVM The cross context VM structure.
[1]2424 * @param pInstrGC Guest context pointer to privileged instruction
2425 * @param pCurInstrGC Guest context pointer to the current instruction
2426 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
[30572]2427 * @param pCacheRec Cache record ptr
[1]2428 *
2429 */
[30572]2430static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
[1]2431{
2432 DISCPUSTATE cpu;
[30572]2433 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
[1]2434 int rc = VWRN_CONTINUE_ANALYSIS;
[41732]2435 uint32_t cbInstr;
[4776]2436 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
[1]2437 bool disret;
2438#ifdef LOG_ENABLED
2439 char szOutput[256];
2440#endif
2441
2442 while (rc == VWRN_CONTINUE_RECOMPILE)
2443 {
[44362]2444 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
[1]2445 if (pCurInstrHC == NULL)
2446 {
2447 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2448 goto end;
2449 }
2450#ifdef LOG_ENABLED
[41671]2451 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
[41732]2452 &cpu, &cbInstr, szOutput, sizeof(szOutput));
[1]2453 Log(("Recompile: %s", szOutput));
2454#else
[41732]2455 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
[1]2456#endif
2457 if (disret == false)
2458 {
2459 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2460
2461 /* Add lookup record for patch to guest address translation */
[44362]2462 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
[1]2463 patmPatchGenIllegalInstr(pVM, pPatch);
[1625]2464 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
[1]2465 goto end;
2466 }
2467
[30572]2468 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
[1]2469 if (rc != VWRN_CONTINUE_RECOMPILE)
2470 {
2471 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2472 if ( rc == VINF_SUCCESS
2473 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2474 {
2475 DISCPUSTATE cpunext;
2476 uint32_t opsizenext;
2477 uint8_t *pNextInstrHC;
[41732]2478 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
[1]2479
[13834]2480 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
[1]2481
2482 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2483 * Recompile the next instruction as well
2484 */
[44362]2485 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
[1]2486 if (pNextInstrHC == NULL)
2487 {
2488 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2489 goto end;
2490 }
[41671]2491 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
[1]2492 if (disret == false)
2493 {
2494 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2495 goto end;
2496 }
[41737]2497 switch(cpunext.pCurInstr->uOpcode)
[1]2498 {
2499 case OP_IRET: /* inhibit cleared in generated code */
2500 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2501 case OP_HLT:
2502 break; /* recompile these */
2503
2504 default:
[41738]2505 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
[1]2506 {
2507 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2508
2509 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2510 AssertRC(rc);
2511 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2512 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2513 }
2514 break;
2515 }
2516
[31437]2517 /* Note: after a cli we must continue to a proper exit point */
[41737]2518 if (cpunext.pCurInstr->uOpcode != OP_CLI)
[1]2519 {
[30572]2520 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
[13833]2521 if (RT_SUCCESS(rc))
[1]2522 {
2523 rc = VINF_SUCCESS;
2524 goto end;
2525 }
2526 break;
2527 }
2528 else
2529 rc = VWRN_CONTINUE_RECOMPILE;
2530 }
2531 else
2532 break; /* done! */
2533 }
2534
2535 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2536
2537
2538 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
[41738]2539 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2540 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
[41737]2541 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
[1]2542 )
2543 {
[9212]2544 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
[1]2545 if (addr == 0)
2546 {
[41739]2547 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
[1]2548 rc = VERR_PATCHING_REFUSED;
2549 break;
2550 }
2551
[13834]2552 Log(("Jump encountered target %RRv\n", addr));
[1]2553
2554 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
[41738]2555 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
[1]2556 {
2557 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2558 /* First we need to finish this linear code stream until the next exit point. */
[41732]2559 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
[13833]2560 if (RT_FAILURE(rc))
[1]2561 {
2562 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2563 break; //fatal error
2564 }
2565 }
2566
2567 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2568 {
2569 /* New code; let's recompile it. */
2570 Log(("patmRecompileCodeStream continue with jump\n"));
2571
2572 /*
2573 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2574 * this patch so we can continue our analysis
2575 *
2576 * We rely on CSAM to detect and resolve conflicts
2577 */
[44362]2578 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
[1]2579 if(pTargetPatch)
2580 {
[13834]2581 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
[1]2582 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2583 }
2584
[41737]2585 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
[30572]2586 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
[41737]2587 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
[1]2588
2589 if(pTargetPatch)
2590 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2591
[13833]2592 if (RT_FAILURE(rc))
[1]2593 {
2594 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2595 break; //done!
2596 }
2597 }
2598 /* Always return to caller here; we're done! */
2599 rc = VINF_SUCCESS;
2600 goto end;
2601 }
2602 else
[41738]2603 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
[1]2604 {
2605 rc = VINF_SUCCESS;
2606 goto end;
2607 }
[41732]2608 pCurInstrGC += cbInstr;
[1]2609 }
2610end:
2611 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2612 return rc;
2613}
2614
2615
2616/**
2617 * Generate the jump from guest to patch code
2618 *
2619 * @returns VBox status code.
[58122]2620 * @param pVM The cross context VM structure.
[1]2621 * @param pPatch Patch record
[30572]2622 * @param pCacheRec Guest translation lookup cache record
[58126]2623 * @param fAddFixup Whether to add a fixup record.
[1]2624 */
[30572]2625static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
[1]2626{
2627 uint8_t temp[8];
2628 uint8_t *pPB;
2629 int rc;
2630
2631 Assert(pPatch->cbPatchJump <= sizeof(temp));
2632 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2633
[44362]2634 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
[30572]2635 Assert(pPB);
[1]2636
2637#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2638 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2639 {
2640 Assert(pPatch->pPatchJumpDestGC);
2641
2642 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2643 {
2644 // jmp [PatchCode]
2645 if (fAddFixup)
2646 {
[54714]2647 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2648 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
[1]2649 {
2650 Log(("Relocation failed for the jump in the guest code!!\n"));
2651 return VERR_PATCHING_REFUSED;
2652 }
2653 }
2654
2655 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2656 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2657 }
2658 else
2659 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2660 {
2661 // jmp [PatchCode]
2662 if (fAddFixup)
2663 {
[54714]2664 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2665 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
[1]2666 {
2667 Log(("Relocation failed for the jump in the guest code!!\n"));
2668 return VERR_PATCHING_REFUSED;
2669 }
2670 }
2671
2672 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2673 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2674 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2675 }
2676 else
2677 {
2678 Assert(0);
2679 return VERR_PATCHING_REFUSED;
2680 }
2681 }
2682 else
2683#endif
2684 {
2685 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2686
2687 // jmp [PatchCode]
2688 if (fAddFixup)
2689 {
[54714]2690 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
2691 PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
[1]2692 {
2693 Log(("Relocation failed for the jump in the guest code!!\n"));
2694 return VERR_PATCHING_REFUSED;
2695 }
2696 }
2697 temp[0] = 0xE9; //jmp
[9228]2698 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
[1]2699 }
[18927]2700 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
[1]2701 AssertRC(rc);
2702
2703 if (rc == VINF_SUCCESS)
2704 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2705
2706 return rc;
2707}
2708
2709/**
2710 * Remove the jump from guest to patch code
2711 *
2712 * @returns VBox status code.
[58122]2713 * @param pVM The cross context VM structure.
[1]2714 * @param pPatch Patch record
2715 */
2716static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2717{
2718#ifdef DEBUG
2719 DISCPUSTATE cpu;
2720 char szOutput[256];
[41732]2721 uint32_t cbInstr, i = 0;
[1]2722 bool disret;
2723
[30572]2724 while (i < pPatch->cbPrivInstr)
[1]2725 {
[41671]2726 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
[41732]2727 &cpu, &cbInstr, szOutput, sizeof(szOutput));
[1]2728 if (disret == false)
2729 break;
2730
2731 Log(("Org patch jump: %s", szOutput));
[41732]2732 Assert(cbInstr);
2733 i += cbInstr;
[1]2734 }
2735#endif
2736
2737 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
[18927]2738 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
[1]2739#ifdef DEBUG
2740 if (rc == VINF_SUCCESS)
2741 {
[25777]2742 i = 0;
[41671]2743 while (i < pPatch->cbPrivInstr)
[1]2744 {
[41671]2745 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
[41732]2746 &cpu, &cbInstr, szOutput, sizeof(szOutput));
[1]2747 if (disret == false)
2748 break;
2749
2750 Log(("Org instr: %s", szOutput));
[41732]2751 Assert(cbInstr);
2752 i += cbInstr;
[1]2753 }
2754 }
2755#endif
2756 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2757 return rc;
2758}
2759
2760/**
2761 * Generate the call from guest to patch code
2762 *
2763 * @returns VBox status code.
[58122]2764 * @param pVM The cross context VM structure.
[1]2765 * @param pPatch Patch record
[58126]2766 * @param pTargetGC The target of the fixup (i.e. the patch code we're
2767 * calling into).
[30572]2768 * @param pCacheRec Guest translation cache record
[58126]2769 * @param fAddFixup Whether to add a fixup record.
[1]2770 */
[30572]2771static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
[1]2772{
2773 uint8_t temp[8];
2774 uint8_t *pPB;
2775 int rc;
2776
2777 Assert(pPatch->cbPatchJump <= sizeof(temp));
2778
[44362]2779 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
[30572]2780 Assert(pPB);
[1]2781
2782 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2783
2784 // jmp [PatchCode]
2785 if (fAddFixup)
2786 {
[54714]2787 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
2788 pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
[1]2789 {
2790 Log(("Relocation failed for the jump in the guest code!!\n"));
2791 return VERR_PATCHING_REFUSED;
2792 }
2793 }
2794
2795 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2796 temp[0] = pPatch->aPrivInstr[0];
2797 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2798
[18927]2799 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
[1]2800 AssertRC(rc);
2801
2802 return rc;
2803}
2804
2805
2806/**
2807 * Patch cli/sti pushf/popf instruction block at specified location
2808 *
2809 * @returns VBox status code.
[58122]2810 * @param pVM The cross context VM structure.
[1]2811 * @param pInstrGC Guest context point to privileged instruction
2812 * @param pInstrHC Host context point to privileged instruction
2813 * @param uOpcode Instruction opcode
2814 * @param uOpSize Size of starting instruction
2815 * @param pPatchRec Patch record
2816 *
2817 * @note returns failure if patching is not allowed or possible
2818 *
2819 */
[44362]2820static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2821 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
[1]2822{
2823 PPATCHINFO pPatch = &pPatchRec->patch;
2824 int rc = VERR_PATCHING_REFUSED;
[62649]2825 uint32_t orgOffsetPatchMem = UINT32_MAX;
[9228]2826 RTRCPTR pInstrStart;
[36801]2827 bool fInserted;
[39078]2828 NOREF(pInstrHC); NOREF(uOpSize);
[1]2829
2830 /* Save original offset (in case of failures later on) */
2831 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2832 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2833
2834 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2835 switch (uOpcode)
2836 {
2837 case OP_MOV:
2838 break;
2839
2840 case OP_CLI:
2841 case OP_PUSHF:
2842 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
[31437]2843 /* Note: special precautions are taken when disabling and enabling such patches. */
[1]2844 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2845 break;
2846
2847 default:
2848 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2849 {
[44362]2850 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
[1]2851 return VERR_INVALID_PARAMETER;
2852 }
2853 }
2854
[3696]2855 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
[2919]2856 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2857
2858 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2859 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2860 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2861 )
[1]2862 {
2863 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
[30572]2864 Log(("Patch jump would cross page boundary -> refuse!!\n"));
[1]2865 rc = VERR_PATCHING_REFUSED;
2866 goto failure;
2867 }
2868
2869 pPatch->nrPatch2GuestRecs = 0;
2870 pInstrStart = pInstrGC;
2871
2872#ifdef PATM_ENABLE_CALL
2873 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2874#endif
2875
2876 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2877 pPatch->uCurPatchOffset = 0;
2878
2879 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2880 {
2881 Assert(pPatch->flags & PATMFL_INTHANDLER);
2882
2883 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2884 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
[13833]2885 if (RT_FAILURE(rc))
[1]2886 goto failure;
2887 }
2888
2889 /***************************************************************************************************************************/
[31437]2890 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
[1]2891 /***************************************************************************************************************************/
2892#ifdef VBOX_WITH_STATISTICS
2893 if (!(pPatch->flags & PATMFL_SYSENTER))
2894 {
2895 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
[13833]2896 if (RT_FAILURE(rc))
[1]2897 goto failure;
2898 }
2899#endif
2900
[30572]2901 PATMP2GLOOKUPREC cacheRec;
2902 RT_ZERO(cacheRec);
2903 cacheRec.pPatch = pPatch;
2904
2905 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2906 /* Free leftover lock if any. */
2907 if (cacheRec.Lock.pvMap)
2908 {
2909 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2910 cacheRec.Lock.pvMap = NULL;
2911 }
[1]2912 if (rc != VINF_SUCCESS)
2913 {
2914 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2915 goto failure;
2916 }
2917
2918 /* Calculated during analysis. */
2919 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2920 {
2921 /* Most likely cause: we encountered an illegal instruction very early on. */
2922 /** @todo could turn it into an int3 callable patch. */
[44362]2923 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
[1]2924 rc = VERR_PATCHING_REFUSED;
2925 goto failure;
2926 }
2927
2928 /* size of patch block */
2929 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2930
2931
2932 /* Update free pointer in patch memory. */
2933 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2934 /* Round to next 8 byte boundary. */
2935 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2936
2937 /*
2938 * Insert into patch to guest lookup tree
2939 */
[13834]2940 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
[1]2941 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
[36801]2942 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2943 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2944 if (!fInserted)
[1]2945 {
2946 rc = VERR_PATCHING_REFUSED;
2947 goto failure;
2948 }
2949
2950 /* Note that patmr3SetBranchTargets can install additional patches!! */
2951 rc = patmr3SetBranchTargets(pVM, pPatch);
2952 if (rc != VINF_SUCCESS)
2953 {
2954 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2955 goto failure;
2956 }
2957
2958#ifdef LOG_ENABLED
2959 Log(("Patch code ----------------------------------------------------------\n"));
[57411]2960 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmR3DisasmCallback, &cacheRec);
[30572]2961 /* Free leftover lock if any. */
2962 if (cacheRec.Lock.pvMap)
2963 {
2964 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2965 cacheRec.Lock.pvMap = NULL;
2966 }
[1]2967 Log(("Patch code ends -----------------------------------------------------\n"));
2968#endif
2969
2970 /* make a copy of the guest code bytes that will be overwritten */
2971 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2972
[18927]2973 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
[1]2974 AssertRC(rc);
2975
2976 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2977 {
[41658]2978 /*uint8_t bASMInt3 = 0xCC; - unused */
[1]2979
[44362]2980 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
[1]2981 /* Replace first opcode byte with 'int 3'. */
[23]2982 rc = patmActivateInt3Patch(pVM, pPatch);
[13833]2983 if (RT_FAILURE(rc))
[1]2984 goto failure;
2985
[2921]2986 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2987 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2988
[1]2989 pPatch->flags &= ~PATMFL_INSTR_HINT;
2990 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2991 }
[2921]2992 else
[2919]2993 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
[1]2994 {
[2919]2995 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
[1]2996 /* now insert a jump in the guest code */
[30572]2997 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
[1]2998 AssertRC(rc);
[13833]2999 if (RT_FAILURE(rc))
[1]3000 goto failure;
3001
3002 }
3003
[46150]3004 patmR3DbgAddPatch(pVM, pPatchRec);
3005
[41671]3006 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
[1]3007
3008 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3009 pPatch->pTempInfo->nrIllegalInstr = 0;
3010
[13834]3011 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
[1]3012
3013 pPatch->uState = PATCH_ENABLED;
3014 return VINF_SUCCESS;
3015
3016failure:
3017 if (pPatchRec->CoreOffset.Key)
[9228]3018 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
[1]3019
3020 patmEmptyTree(pVM, &pPatch->FixupTree);
3021 pPatch->nrFixups = 0;
3022
3023 patmEmptyTree(pVM, &pPatch->JumpTree);
3024 pPatch->nrJumpRecs = 0;
3025
3026 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3027 pPatch->pTempInfo->nrIllegalInstr = 0;
3028
3029 /* Turn this cli patch into a dummy. */
3030 pPatch->uState = PATCH_REFUSED;
3031 pPatch->pPatchBlockOffset = 0;
3032
3033 // Give back the patch memory we no longer need
3034 Assert(orgOffsetPatchMem != (uint32_t)~0);
3035 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3036
3037 return rc;
3038}
3039
3040/**
3041 * Patch IDT handler
3042 *
3043 * @returns VBox status code.
[58122]3044 * @param pVM The cross context VM structure.
[1]3045 * @param pInstrGC Guest context point to privileged instruction
3046 * @param uOpSize Size of starting instruction
3047 * @param pPatchRec Patch record
[30572]3048 * @param pCacheRec Cache record ptr
[1]3049 *
3050 * @note returns failure if patching is not allowed or possible
3051 *
3052 */
[30572]3053static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
[1]3054{
3055 PPATCHINFO pPatch = &pPatchRec->patch;
3056 bool disret;
3057 DISCPUSTATE cpuPush, cpuJmp;
[41732]3058 uint32_t cbInstr;
[9228]3059 RTRCPTR pCurInstrGC = pInstrGC;
[30572]3060 uint8_t *pCurInstrHC, *pInstrHC;
[62649]3061 uint32_t orgOffsetPatchMem = UINT32_MAX;
[1]3062
[44362]3063 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
[30572]3064 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3065
[1]3066 /*
3067 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3068 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3069 * condition here and only patch the common entypoint once.
3070 */
[41732]3071 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
[1]3072 Assert(disret);
[41737]3073 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
[1]3074 {
[9228]3075 RTRCPTR pJmpInstrGC;
[1]3076 int rc;
[41732]3077 pCurInstrGC += cbInstr;
[1]3078
[41732]3079 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
[1]3080 if ( disret
[41737]3081 && cpuJmp.pCurInstr->uOpcode == OP_JMP
[1]3082 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3083 )
3084 {
[36801]3085 bool fInserted;
[9228]3086 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
[1]3087 if (pJmpPatch == 0)
3088 {
3089 /* Patch it first! */
3090 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3091 if (rc != VINF_SUCCESS)
3092 goto failure;
[9228]3093 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
[1]3094 Assert(pJmpPatch);
3095 }
3096 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3097 goto failure;
3098
3099 /* save original offset (in case of failures later on) */
3100 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3101
3102 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3103 pPatch->uCurPatchOffset = 0;
3104 pPatch->nrPatch2GuestRecs = 0;
3105
3106#ifdef VBOX_WITH_STATISTICS
3107 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
[13833]3108 if (RT_FAILURE(rc))
[1]3109 goto failure;
3110#endif
3111
3112 /* Install fake cli patch (to clear the virtual IF) */
3113 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
[13833]3114 if (RT_FAILURE(rc))
[1]3115 goto failure;
3116
3117 /* Add lookup record for patch to guest address translation (for the push) */
[44362]3118 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
[1]3119
3120 /* Duplicate push. */
3121 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
[13833]3122 if (RT_FAILURE(rc))
[1]3123 goto failure;
3124
3125 /* Generate jump to common entrypoint. */
3126 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
[13833]3127 if (RT_FAILURE(rc))
[1]3128 goto failure;
3129
3130 /* size of patch block */
3131 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3132
3133 /* Update free pointer in patch memory. */
3134 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3135 /* Round to next 8 byte boundary */
3136 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3137
3138 /* There's no jump from guest to patch code. */
3139 pPatch->cbPatchJump = 0;
3140
3141
3142#ifdef LOG_ENABLED
3143 Log(("Patch code ----------------------------------------------------------\n"));
[57411]3144 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmR3DisasmCallback, pCacheRec);
[1]3145 Log(("Patch code ends -----------------------------------------------------\n"));
3146#endif
[13834]3147 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
[1]3148
3149 /*
3150 * Insert into patch to guest lookup tree
3151 */
[13834]3152 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
[1]3153 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
[36801]3154 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3155 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
[46150]3156 patmR3DbgAddPatch(pVM, pPatchRec);
[1]3157
3158 pPatch->uState = PATCH_ENABLED;
3159
3160 return VINF_SUCCESS;
3161 }
3162 }
3163failure:
3164 /* Give back the patch memory we no longer need */
3165 if (orgOffsetPatchMem != (uint32_t)~0)
3166 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3167
[44362]3168 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
[1]3169}
3170
3171/**
3172 * Install a trampoline to call a guest trap handler directly
3173 *
3174 * @returns VBox status code.
[58122]3175 * @param pVM The cross context VM structure.
[1]3176 * @param pInstrGC Guest context point to privileged instruction
3177 * @param pPatchRec Patch record
[30572]3178 * @param pCacheRec Cache record ptr
[1]3179 *
3180 */
[30572]3181static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
[1]3182{
3183 PPATCHINFO pPatch = &pPatchRec->patch;
3184 int rc = VERR_PATCHING_REFUSED;
[62649]3185 uint32_t orgOffsetPatchMem = UINT32_MAX;
[36801]3186 bool fInserted;
[1]3187
3188 // save original offset (in case of failures later on)
3189 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3190
3191 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3192 pPatch->uCurPatchOffset = 0;
3193 pPatch->nrPatch2GuestRecs = 0;
3194
3195#ifdef VBOX_WITH_STATISTICS
3196 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
[13833]3197 if (RT_FAILURE(rc))
[1]3198 goto failure;
3199#endif
3200
3201 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
[13833]3202 if (RT_FAILURE(rc))
[1]3203 goto failure;
3204
3205 /* size of patch block */
3206 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3207
3208 /* Update free pointer in patch memory. */
3209 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3210 /* Round to next 8 byte boundary */
3211 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3212
3213 /* There's no jump from guest to patch code. */
3214 pPatch->cbPatchJump = 0;
3215
3216#ifdef LOG_ENABLED
3217 Log(("Patch code ----------------------------------------------------------\n"));
[57411]3218 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmR3DisasmCallback, pCacheRec);
[1]3219 Log(("Patch code ends -----------------------------------------------------\n"));
[62651]3220#else
3221 RT_NOREF_PV(pCacheRec);
[1]3222#endif
[41671]3223 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
[13834]3224 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
[1]3225
3226 /*
3227 * Insert into patch to guest lookup tree
3228 */
[13834]3229 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
[1]3230 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
[36801]3231 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3232 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
[46150]3233 patmR3DbgAddPatch(pVM, pPatchRec);
[1]3234
3235 pPatch->uState = PATCH_ENABLED;
3236 return VINF_SUCCESS;
3237
3238failure:
3239 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3240
3241 /* Turn this cli patch into a dummy. */
3242 pPatch->uState = PATCH_REFUSED;
3243 pPatch->pPatchBlockOffset = 0;
3244
3245 /* Give back the patch memory we no longer need */
3246 Assert(orgOffsetPatchMem != (uint32_t)~0);
3247 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3248
3249 return rc;
3250}
3251
3252
[6107]3253#ifdef LOG_ENABLED
[1]3254/**
3255 * Check if the instruction is patched as a common idt handler
3256 *
3257 * @returns true or false
[58122]3258 * @param pVM The cross context VM structure.
[1]3259 * @param pInstrGC Guest context point to the instruction
3260 *
3261 */
[9228]3262static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
[1]3263{
3264 PPATMPATCHREC pRec;
3265
[9228]3266 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
[1]3267 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3268 return true;
3269 return false;
3270}
3271#endif //DEBUG
3272
3273
3274/**
3275 * Duplicates a complete function
3276 *
3277 * @returns VBox status code.
[58122]3278 * @param pVM The cross context VM structure.
[1]3279 * @param pInstrGC Guest context point to privileged instruction
3280 * @param pPatchRec Patch record
[30572]3281 * @param pCacheRec Cache record ptr
[1]3282 *
3283 */
[30572]3284static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
[1]3285{
3286 PPATCHINFO pPatch = &pPatchRec->patch;
3287 int rc = VERR_PATCHING_REFUSED;
[62649]3288 uint32_t orgOffsetPatchMem = UINT32_MAX;
[36801]3289 bool fInserted;
[1]3290
[13834]3291 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
[1]3292 /* Save original offset (in case of failures later on). */
3293 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3294
3295 /* We will not go on indefinitely with call instruction handling. */
3296 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3297 {
3298 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3299 return VERR_PATCHING_REFUSED;
3300 }
3301
3302 pVM->patm.s.ulCallDepth++;
3303
3304#ifdef PATM_ENABLE_CALL
3305 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3306#endif
3307
3308 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3309
3310 pPatch->nrPatch2GuestRecs = 0;
3311 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3312 pPatch->uCurPatchOffset = 0;
3313
[31437]3314 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
[1]3315 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
[13833]3316 if (RT_FAILURE(rc))
[1]3317 goto failure;
3318
3319#ifdef VBOX_WITH_STATISTICS
3320 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
[13833]3321 if (RT_FAILURE(rc))
[1]3322 goto failure;
3323#endif
[30572]3324
3325 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
[1]3326 if (rc != VINF_SUCCESS)
3327 {
3328 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3329 goto failure;
3330 }
3331
3332 //size of patch block
3333 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3334
3335 //update free pointer in patch memory
3336 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3337 /* Round to next 8 byte boundary. */
3338 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3339
3340 pPatch->uState = PATCH_ENABLED;
3341
3342 /*
3343 * Insert into patch to guest lookup tree
3344 */
[13834]3345 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
[1]3346 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
[36801]3347 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3348 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3349 if (!fInserted)
[1]3350 {
3351 rc = VERR_PATCHING_REFUSED;
3352 goto failure;
3353 }
3354
3355 /* Note that patmr3SetBranchTargets can install additional patches!! */
3356 rc = patmr3SetBranchTargets(pVM, pPatch);
3357 if (rc != VINF_SUCCESS)
3358 {
3359 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3360 goto failure;
3361 }
3362
[46150]3363 patmR3DbgAddPatch(pVM, pPatchRec);
3364
[1]3365#ifdef LOG_ENABLED
3366 Log(("Patch code ----------------------------------------------------------\n"));
[57411]3367 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmR3DisasmCallback, pCacheRec);
[1]3368 Log(("Patch code ends -----------------------------------------------------\n"));
3369#endif
3370
[13834]3371 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
[1]3372
3373 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3374 pPatch->pTempInfo->nrIllegalInstr = 0;
3375
3376 pVM->patm.s.ulCallDepth--;
3377 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3378 return VINF_SUCCESS;
3379
3380failure:
3381 if (pPatchRec->CoreOffset.Key)
[9228]3382 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
[1]3383
3384 patmEmptyTree(pVM, &pPatch->FixupTree);
3385 pPatch->nrFixups = 0;
3386
3387 patmEmptyTree(pVM, &pPatch->JumpTree);
3388 pPatch->nrJumpRecs = 0;
3389
3390 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3391 pPatch->pTempInfo->nrIllegalInstr = 0;
3392
3393 /* Turn this cli patch into a dummy. */
3394 pPatch->uState = PATCH_REFUSED;
3395 pPatch->pPatchBlockOffset = 0;
3396
3397 // Give back the patch memory we no longer need
3398 Assert(orgOffsetPatchMem != (uint32_t)~0);
3399 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3400
3401 pVM->patm.s.ulCallDepth--;
[13834]3402 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
[1]3403 return rc;
3404}
3405
3406/**
3407 * Creates trampoline code to jump inside an existing patch
3408 *
3409 * @returns VBox status code.
[58122]3410 * @param pVM The cross context VM structure.
[1]3411 * @param pInstrGC Guest context point to privileged instruction
3412 * @param pPatchRec Patch record
3413 *
3414 */
[9228]3415static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
[1]3416{
3417 PPATCHINFO pPatch = &pPatchRec->patch;
[9228]3418 RTRCPTR pPage, pPatchTargetGC = 0;
[62649]3419 uint32_t orgOffsetPatchMem = UINT32_MAX;
[1]3420 int rc = VERR_PATCHING_REFUSED;
[36669]3421 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3422 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3423 bool fInserted = false;
[1]3424
[13834]3425 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
[1]3426 /* Save original offset (in case of failures later on). */
3427 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3428
3429 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3430 /** @todo we already checked this before */
3431 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3432
[9228]3433 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
[1]3434 if (pPatchPage)
3435 {
3436 uint32_t i;
3437
3438 for (i=0;i<pPatchPage->cCount;i++)
3439 {
[41897]3440 if (pPatchPage->papPatch[i])
[1]3441 {
[41897]3442 pPatchToJmp = pPatchPage->papPatch[i];
[1]3443
[36669]3444 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3445 && pPatchToJmp->uState == PATCH_ENABLED)
[1]3446 {
[36669]3447 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
[1]3448 if (pPatchTargetGC)
[2030]3449 {
3450 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
[36669]3451 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
[2030]3452 Assert(pPatchToGuestRec);
3453
3454 pPatchToGuestRec->fJumpTarget = true;
[36669]3455 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3456 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
[1]3457 break;
[2030]3458 }
[1]3459 }
3460 }
3461 }
3462 }
[36669]3463 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
[1]3464
[36669]3465 /*
3466 * Only record the trampoline patch if this is the first patch to the target
3467 * or we recorded other patches already.
3468 * The goal is to refuse refreshing function duplicates if the guest
3469 * modifies code after a saved state was loaded because it is not possible
3470 * to save the relation between trampoline and target without changing the
3471 * saved satte version.
3472 */
3473 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3474 || pPatchToJmp->pTrampolinePatchesHead)
3475 {
3476 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3477 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3478 if (!pTrampRec)
3479 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3480
3481 pTrampRec->pPatchTrampoline = pPatchRec;
3482 }
3483
[1]3484 pPatch->nrPatch2GuestRecs = 0;
3485 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3486 pPatch->uCurPatchOffset = 0;
3487
[31437]3488 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
[1]3489 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
[13833]3490 if (RT_FAILURE(rc))
[1]3491 goto failure;
3492
3493#ifdef VBOX_WITH_STATISTICS
3494 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
[13833]3495 if (RT_FAILURE(rc))
[1]3496 goto failure;
3497#endif
3498
3499 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
[13833]3500 if (RT_FAILURE(rc))
[1]3501 goto failure;
3502
3503 /*
3504 * Insert into patch to guest lookup tree
3505 */
[13834]3506 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
[1]3507 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
[36669]3508 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3509 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3510 if (!fInserted)
[1]3511 {
3512 rc = VERR_PATCHING_REFUSED;
3513 goto failure;
3514 }
[46150]3515 patmR3DbgAddPatch(pVM, pPatchRec);
[1]3516
3517 /* size of patch block */
3518 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3519
3520 /* Update free pointer in patch memory. */
3521 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3522 /* Round to next 8 byte boundary */
3523 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3524
3525 /* There's no jump from guest to patch code. */
3526 pPatch->cbPatchJump = 0;
3527
3528 /* Enable the patch. */
3529 pPatch->uState = PATCH_ENABLED;
3530 /* We allow this patch to be called as a function. */
3531 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
[36669]3532
3533 if (pTrampRec)
3534 {
3535 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3536 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3537 }
[1]3538 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3539 return VINF_SUCCESS;
3540
3541failure:
3542 if (pPatchRec->CoreOffset.Key)
[9228]3543 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
[1]3544
3545 patmEmptyTree(pVM, &pPatch->FixupTree);
3546 pPatch->nrFixups = 0;
3547
3548 patmEmptyTree(pVM, &pPatch->JumpTree);
3549 pPatch->nrJumpRecs = 0;
3550
3551 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3552 pPatch->pTempInfo->nrIllegalInstr = 0;
3553
3554 /* Turn this cli patch into a dummy. */
3555 pPatch->uState = PATCH_REFUSED;
3556 pPatch->pPatchBlockOffset = 0;
3557
3558 // Give back the patch memory we no longer need
3559 Assert(orgOffsetPatchMem != (uint32_t)~0);
3560 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3561
[36669]3562 if (pTrampRec)
3563 MMR3HeapFree(pTrampRec);
3564
[1]3565 return rc;
3566}
3567
3568
3569/**
3570 * Patch branch target function for call/jump at specified location.
3571 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3572 *
3573 * @returns VBox status code.
[58122]3574 * @param pVM The cross context VM structure.
[41836]3575 * @param pCtx Pointer to the guest CPU context.
[1]3576 *
3577 */
[44362]3578VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
[1]3579{
[9228]3580 RTRCPTR pBranchTarget, pPage;
[1]3581 int rc;
[9228]3582 RTRCPTR pPatchTargetGC = 0;
[70948]3583 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[1]3584
3585 pBranchTarget = pCtx->edx;
[41727]3586 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
[1]3587
3588 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3589 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3590
[9228]3591 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
[1]3592 if (pPatchPage)
3593 {
3594 uint32_t i;
3595
3596 for (i=0;i<pPatchPage->cCount;i++)
3597 {
[41897]3598 if (pPatchPage->papPatch[i])
[1]3599 {
[41897]3600 PPATCHINFO pPatch = pPatchPage->papPatch[i];
[1]3601
3602 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3603 && pPatch->uState == PATCH_ENABLED)
3604 {
3605 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3606 if (pPatchTargetGC)
3607 {
3608 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3609 break;
3610 }
3611 }
3612 }
3613 }
3614 }
3615
3616 if (pPatchTargetGC)
3617 {
[54764]3618 /* Create a trampoline that also sets PATM_ASMFIX_INTERRUPTFLAG. */
[1]3619 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3620 }
3621 else
3622 {
3623 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3624 }
3625
3626 if (rc == VINF_SUCCESS)
3627 {
3628 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3629 Assert(pPatchTargetGC);
3630 }
3631
3632 if (pPatchTargetGC)
3633 {
3634 pCtx->eax = pPatchTargetGC;
[9228]3635 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
[1]3636 }
3637 else
3638 {
3639 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3640 pCtx->eax = 0;
3641 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3642 }
3643 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
[44362]3644 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
[1]3645 AssertRC(rc);
3646
3647 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3648 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3649 return VINF_SUCCESS;
3650}
3651
3652/**
3653 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3654 *
3655 * @returns VBox status code.
[58122]3656 * @param pVM The cross context VM structure.
[1]3657 * @param pCpu Disassembly CPU structure ptr
3658 * @param pInstrGC Guest context point to privileged instruction
[30572]3659 * @param pCacheRec Cache record ptr
[1]3660 *
3661 */
[30572]3662static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
[1]3663{
[30572]3664 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
[1]3665 int rc = VERR_PATCHING_REFUSED;
3666 DISCPUSTATE cpu;
[9228]3667 RTRCPTR pTargetGC;
[1]3668 PPATMPATCHREC pPatchFunction;
[41732]3669 uint32_t cbInstr;
[1]3670 bool disret;
3671
3672 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
[41737]3673 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
[1]3674
[41737]3675 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
[1]3676 {
3677 rc = VERR_PATCHING_REFUSED;
3678 goto failure;
3679 }
3680
3681 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3682 if (pTargetGC == 0)
3683 {
[41739]3684 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
[1]3685 rc = VERR_PATCHING_REFUSED;
3686 goto failure;
3687 }
3688
[9228]3689 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
[1]3690 if (pPatchFunction == NULL)
3691 {
3692 for(;;)
3693 {
3694 /* It could be an indirect call (call -> jmp dest).
3695 * Note that it's dangerous to assume the jump will never change...
3696 */
3697 uint8_t *pTmpInstrHC;
3698
[44362]3699 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
[1]3700 Assert(pTmpInstrHC);
3701 if (pTmpInstrHC == 0)
3702 break;
3703
[41732]3704 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
[41737]3705 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
[1]3706 break;
3707
3708 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3709 if (pTargetGC == 0)
3710 {
3711 break;
3712 }
3713
[9228]3714 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
[1]3715 break;
3716 }
3717 if (pPatchFunction == 0)
3718 {
[13834]3719 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
[1]3720 rc = VERR_PATCHING_REFUSED;
3721 goto failure;
3722 }
3723 }
3724
3725 // make a copy of the guest code bytes that will be overwritten
3726 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3727
[18927]3728 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
[1]3729 AssertRC(rc);
3730
3731 /* Now replace the original call in the guest code */
[30572]3732 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
[1]3733 AssertRC(rc);
[13833]3734 if (RT_FAILURE(rc))
[1]3735 goto failure;
3736
3737 /* Lowest and highest address for write monitoring. */
3738 pPatch->pInstrGCLowest = pInstrGC;
[41732]3739 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
[41671]3740 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
[1]3741
[13834]3742 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
[1]3743
3744 pPatch->uState = PATCH_ENABLED;
3745 return VINF_SUCCESS;
3746
3747failure:
3748 /* Turn this patch into a dummy. */
3749 pPatch->uState = PATCH_REFUSED;
3750
3751 return rc;
3752}
3753
3754/**
3755 * Replace the address in an MMIO instruction with the cached version.
3756 *
3757 * @returns VBox status code.
[58122]3758 * @param pVM The cross context VM structure.
[1]3759 * @param pInstrGC Guest context point to privileged instruction
3760 * @param pCpu Disassembly CPU structure ptr
[30572]3761 * @param pCacheRec Cache record ptr
[1]3762 *
3763 * @note returns failure if patching is not allowed or possible
3764 *
3765 */
[30572]3766static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
[1]3767{
[30572]3768 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3769 uint8_t *pPB;
3770 int rc = VERR_PATCHING_REFUSED;
[1]3771
3772 Assert(pVM->patm.s.mmio.pCachedData);
3773 if (!pVM->patm.s.mmio.pCachedData)
3774 goto failure;
3775
[41739]3776 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
[1]3777 goto failure;
3778
[44362]3779 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
[30572]3780 if (pPB == 0)
3781 goto failure;
[1]3782
3783 /* Add relocation record for cached data access. */
[46159]3784 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3785 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
[1]3786 {
3787 Log(("Relocation failed for cached mmio address!!\n"));
3788 return VERR_PATCHING_REFUSED;
3789 }
[41671]3790 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
[1]3791
3792 /* Save original instruction. */
[18927]3793 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
[1]3794 AssertRC(rc);
3795
3796 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3797
3798 /* Replace address with that of the cached item. */
[46159]3799 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3800 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
[1]3801 AssertRC(rc);
[13833]3802 if (RT_FAILURE(rc))
[1]3803 {
3804 goto failure;
3805 }
3806
[41671]3807 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
[1]3808 pVM->patm.s.mmio.pCachedData = 0;
3809 pVM->patm.s.mmio.GCPhys = 0;
3810 pPatch->uState = PATCH_ENABLED;
3811 return VINF_SUCCESS;
3812
3813failure:
3814 /* Turn this patch into a dummy. */
3815 pPatch->uState = PATCH_REFUSED;
3816
3817 return rc;
3818}
3819
3820
3821/**
3822 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3823 *
3824 * @returns VBox status code.
[58122]3825 * @param pVM The cross context VM structure.
[1]3826 * @param pInstrGC Guest context point to privileged instruction
3827 * @param pPatch Patch record
3828 *
3829 * @note returns failure if patching is not allowed or possible
3830 *
3831 */
[9228]3832static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
[1]3833{
3834 DISCPUSTATE cpu;
[41732]3835 uint32_t cbInstr;
[1]3836 bool disret;
3837 uint8_t *pInstrHC;
3838
3839 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3840
3841 /* Convert GC to HC address. */
3842 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3843 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3844
3845 /* Disassemble mmio instruction. */
[41671]3846 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
[41732]3847 &cpu, &cbInstr);
[1]3848 if (disret == false)
3849 {
3850 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3851 return VERR_PATCHING_REFUSED;
3852 }
3853
[41732]3854 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3855 if (cbInstr > MAX_INSTR_SIZE)
[1]3856 return VERR_PATCHING_REFUSED;
[41739]3857 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
[1]3858 return VERR_PATCHING_REFUSED;
3859
3860 /* Add relocation record for cached data access. */
[41732]3861 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
[1]3862 {
3863 Log(("Relocation failed for cached mmio address!!\n"));
3864 return VERR_PATCHING_REFUSED;
3865 }
3866 /* Replace address with that of the cached item. */
[41732]3867 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
[1]3868
3869 /* Lowest and highest address for write monitoring. */
3870 pPatch->pInstrGCLowest = pInstrGC;
[41732]3871 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
[1]3872
[41671]3873 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
[1]3874 pVM->patm.s.mmio.pCachedData = 0;
3875 pVM->patm.s.mmio.GCPhys = 0;
3876 return VINF_SUCCESS;
3877}
3878
3879/**
[23]3880 * Activates an int3 patch
[1]3881 *
3882 * @returns VBox status code.
[58122]3883 * @param pVM The cross context VM structure.
[1]3884 * @param pPatch Patch record
3885 */
3886static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3887{
[41658]3888 uint8_t bASMInt3 = 0xCC;
[1]3889 int rc;
3890
3891 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3892 Assert(pPatch->uState != PATCH_ENABLED);
3893
3894 /* Replace first opcode byte with 'int 3'. */
[41658]3895 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
[1]3896 AssertRC(rc);
3897
[41658]3898 pPatch->cbPatchJump = sizeof(bASMInt3);
[1]3899
3900 return rc;
3901}
3902
3903/**
[23]3904 * Deactivates an int3 patch
[1]3905 *
3906 * @returns VBox status code.
[58122]3907 * @param pVM The cross context VM structure.
[1]3908 * @param pPatch Patch record
3909 */
3910static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3911{
[62649]3912 uint8_t cbASMInt3 = 1;
[1]3913 int rc;
3914
3915 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3916 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3917
3918 /* Restore first opcode byte. */
[62649]3919 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, cbASMInt3);
[1]3920 AssertRC(rc);
3921 return rc;
3922}
3923
3924/**
[41658]3925 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3926 * in the raw-mode context.
[1]3927 *
3928 * @returns VBox status code.
[58122]3929 * @param pVM The cross context VM structure.
[1]3930 * @param pInstrGC Guest context point to privileged instruction
3931 * @param pInstrHC Host context point to privileged instruction
3932 * @param pCpu Disassembly CPU structure ptr
3933 * @param pPatch Patch record
3934 *
3935 * @note returns failure if patching is not allowed or possible
3936 *
3937 */
[44362]3938int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
[1]3939{
[62649]3940 uint8_t cbASMInt3 = 1;
[1]3941 int rc;
[62651]3942 RT_NOREF_PV(pInstrHC);
[1]3943
[31437]3944 /* Note: Do not use patch memory here! It might called during patch installation too. */
[44362]3945 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
[1]3946
3947 /* Save the original instruction. */
[18927]3948 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
[1]3949 AssertRC(rc);
[62649]3950 pPatch->cbPatchJump = cbASMInt3; /* bit of a misnomer in this case; size of replacement instruction. */
[1]3951
3952 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3953
3954 /* Replace first opcode byte with 'int 3'. */
3955 rc = patmActivateInt3Patch(pVM, pPatch);
[13833]3956 if (RT_FAILURE(rc))
[1]3957 goto failure;
3958
3959 /* Lowest and highest address for write monitoring. */
3960 pPatch->pInstrGCLowest = pInstrGC;
[41732]3961 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
[1]3962
3963 pPatch->uState = PATCH_ENABLED;
3964 return VINF_SUCCESS;
3965
3966failure:
3967 /* Turn this patch into a dummy. */
3968 return VERR_PATCHING_REFUSED;
3969}
3970
3971#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3972/**
3973 * Patch a jump instruction at specified location
3974 *
3975 * @returns VBox status code.
[58122]3976 * @param pVM The cross context VM structure.
[1]3977 * @param pInstrGC Guest context point to privileged instruction
3978 * @param pInstrHC Host context point to privileged instruction
3979 * @param pCpu Disassembly CPU structure ptr
3980 * @param pPatchRec Patch record
3981 *
3982 * @note returns failure if patching is not allowed or possible
3983 *
3984 */
[9228]3985int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
[1]3986{
3987 PPATCHINFO pPatch = &pPatchRec->patch;
3988 int rc = VERR_PATCHING_REFUSED;
3989
3990 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3991 pPatch->uCurPatchOffset = 0;
3992 pPatch->cbPatchBlockSize = 0;
3993 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3994
3995 /*
3996 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3997 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3998 */
[41737]3999 switch (pCpu->pCurInstr->uOpcode)
[1]4000 {
4001 case OP_JO:
4002 case OP_JNO:
4003 case OP_JC:
4004 case OP_JNC:
4005 case OP_JE:
4006 case OP_JNE:
4007 case OP_JBE:
4008 case OP_JNBE:
4009 case OP_JS:
4010 case OP_JNS:
4011 case OP_JP:
4012 case OP_JNP:
4013 case OP_JL:
4014 case OP_JNL:
4015 case OP_JLE:
4016 case OP_JNLE:
4017 case OP_JMP:
4018 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
[41739]4019 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4020 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
[1]4021 goto failure;
4022
[41732]4023 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4024 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
[1]4025 goto failure;
4026
[41732]4027 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
[1]4028 {
4029 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4030 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4031 rc = VERR_PATCHING_REFUSED;
4032 goto failure;
4033 }
4034
4035 break;
4036
4037 default:
4038 goto failure;
4039 }
4040
4041 // make a copy of the guest code bytes that will be overwritten
[41732]4042 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4043 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4044 pPatch->cbPatchJump = pCpu->cbInstr;
[1]4045
[18927]4046 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
[1]4047 AssertRC(rc);
4048
4049 /* Now insert a jump in the guest code. */
4050 /*
4051 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4052 * references the target instruction in the conflict patch.
4053 */
[44362]4054 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
[1]4055
[44362]4056 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
[1]4057 pPatch->pPatchJumpDestGC = pJmpDest;
4058
[30572]4059 PATMP2GLOOKUPREC cacheRec;
4060 RT_ZERO(cacheRec);
4061 cacheRec.pPatch = pPatch;
[31064]4062
[30572]4063 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4064 /* Free leftover lock if any. */
4065 if (cacheRec.Lock.pvMap)
4066 {
4067 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4068 cacheRec.Lock.pvMap = NULL;
4069 }
[1]4070 AssertRC(rc);
[13833]4071 if (RT_FAILURE(rc))
[1]4072 goto failure;
4073
4074 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4075
[41671]4076 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
[13834]4077 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
[1]4078
4079 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4080
4081 /* Lowest and highest address for write monitoring. */
4082 pPatch->pInstrGCLowest = pInstrGC;
4083 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4084
4085 pPatch->uState = PATCH_ENABLED;
4086 return VINF_SUCCESS;
4087
4088failure:
4089 /* Turn this cli patch into a dummy. */
4090 pPatch->uState = PATCH_REFUSED;
4091
4092 return rc;
4093}
4094#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4095
4096
4097/**
4098 * Gives hint to PATM about supervisor guest instructions
4099 *
4100 * @returns VBox status code.
[58122]4101 * @param pVM The cross context VM structure.
[58126]4102 * @param pInstrGC Guest context point to privileged instruction
[1]4103 * @param flags Patch flags
4104 */
[44362]4105VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
[1]4106{
4107 Assert(pInstrGC);
[62651]4108 Assert(flags == PATMFL_CODE32); RT_NOREF_PV(flags);
[1]4109
[13834]4110 Log(("PATMR3AddHint %RRv\n", pInstrGC));
[1]4111 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4112}
4113
4114/**
4115 * Patch privileged instruction at specified location
4116 *
4117 * @returns VBox status code.
[58122]4118 * @param pVM The cross context VM structure.
[58126]4119 * @param pInstrGC Guest context point to privileged instruction (0:32 flat
4120 * address)
[1]4121 * @param flags Patch flags
4122 *
4123 * @note returns failure if patching is not allowed or possible
4124 */
[44362]4125VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
[1]4126{
4127 DISCPUSTATE cpu;
[4776]4128 R3PTRTYPE(uint8_t *) pInstrHC;
[41732]4129 uint32_t cbInstr;
[1]4130 PPATMPATCHREC pPatchRec;
[1918]4131 PCPUMCTX pCtx = 0;
[1]4132 bool disret;
4133 int rc;
[18927]4134 PVMCPU pVCpu = VMMGetCpu0(pVM);
[42781]4135 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
[1]4136
[70948]4137 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[45620]4138
[31064]4139 if ( !pVM
4140 || pInstrGC == 0
[30572]4141 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
[1]4142 {
4143 AssertFailed();
4144 return VERR_INVALID_PARAMETER;
4145 }
4146
4147 if (PATMIsEnabled(pVM) == false)
4148 return VERR_PATCHING_REFUSED;
4149
4150 /* Test for patch conflict only with patches that actually change guest code. */
4151 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4152 {
[44362]4153 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
[13834]4154 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
[1]4155 if (pConflictPatch != 0)
4156 return VERR_PATCHING_REFUSED;
4157 }
4158
4159 if (!(flags & PATMFL_CODE32))
4160 {
4161 /** @todo Only 32 bits code right now */
4162 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4163 return VERR_NOT_IMPLEMENTED;
4164 }
4165
4166 /* We ran out of patch memory; don't bother anymore. */
4167 if (pVM->patm.s.fOutOfMemory == true)
4168 return VERR_PATCHING_REFUSED;
4169
[45984]4170#if 1 /* DONT COMMIT ENABLED! */
[42781]4171 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4172 if ( 0
4173 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4174 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4175 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4176 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4177 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4178 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4179 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4180 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4181 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4182 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4183 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4184 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4185 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4186 || pInstrGC == 0x80014447 /* KfLowerIrql */
4187 || 0)
4188 {
4189 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4190 return VERR_PATCHING_REFUSED;
4191 }
4192#endif
4193
[1918]4194 /* Make sure the code selector is wide open; otherwise refuse. */
[18927]4195 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
[41939]4196 if (CPUMGetGuestCPL(pVCpu) == 0)
[1921]4197 {
[41727]4198 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
[1926]4199 if (pInstrGCFlat != pInstrGC)
4200 {
[41906]4201 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
[1926]4202 return VERR_PATCHING_REFUSED;
4203 }
[1921]4204 }
[1086]4205
[31437]4206 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
[1]4207 if (!(flags & PATMFL_GUEST_SPECIFIC))
4208 {
4209 /* New code. Make sure CSAM has a go at it first. */
[1086]4210 CSAMR3CheckCode(pVM, pInstrGC);
[1]4211 }
4212
[31437]4213 /* Note: obsolete */
[1]4214 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4215 && (flags & PATMFL_MMIO_ACCESS))
4216 {
[9228]4217 RTRCUINTPTR offset;
[9220]4218 void *pvPatchCoreOffset;
[1]4219
4220 /* Find the patch record. */
4221 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
[9228]4222 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
[1]4223 if (pvPatchCoreOffset == NULL)
4224 {
[13834]4225 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
[1]4226 return VERR_PATCH_NOT_FOUND; //fatal error
4227 }
4228 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4229
4230 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4231 }
4232
4233 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4234
[9228]4235 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
[1]4236 if (pPatchRec)
4237 {
4238 Assert(!(flags & PATMFL_TRAMPOLINE));
4239
4240 /* Hints about existing patches are ignored. */
4241 if (flags & PATMFL_INSTR_HINT)
4242 return VERR_PATCHING_REFUSED;
4243
4244 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4245 {
[13834]4246 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
[1]4247 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4248 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4249 }
4250
4251 if (pPatchRec->patch.uState == PATCH_DISABLED)
4252 {
4253 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4254 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4255 {
[13834]4256 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
[1]4257 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4258 }
4259 else
[13834]4260 Log(("Enabling patch %RRv again\n", pInstrGC));
[1]4261
4262 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4263 rc = PATMR3EnablePatch(pVM, pInstrGC);
[13833]4264 if (RT_SUCCESS(rc))
[1]4265 return VWRN_PATCH_ENABLED;
4266
4267 return rc;
4268 }
4269 if ( pPatchRec->patch.uState == PATCH_ENABLED
4270 || pPatchRec->patch.uState == PATCH_DIRTY)
4271 {
4272 /*
4273 * The patch might have been overwritten.
4274 */
4275 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4276 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4277 {
4278 /* Patch must have been overwritten; remove it and pretend nothing happened. */
[13834]4279 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
[1]4280 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4281 {
4282 if (flags & PATMFL_IDTHANDLER)
4283 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4284
4285 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4286 }
4287 }
[12855]4288 rc = PATMR3RemovePatch(pVM, pInstrGC);
[13833]4289 if (RT_FAILURE(rc))
[12855]4290 return VERR_PATCHING_REFUSED;
[1]4291 }
4292 else
4293 {
[13834]4294 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
[1]4295 /* already tried it once! */
4296 return VERR_PATCHING_REFUSED;
4297 }
4298 }
4299
4300 RTGCPHYS GCPhys;
[18988]4301 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
[1]4302 if (rc != VINF_SUCCESS)
4303 {
[13834]4304 Log(("PGMGstGetPage failed with %Rrc\n", rc));
[1]4305 return rc;
4306 }
4307 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
[23]4308 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
[1]4309 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4310 {
4311 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4312 return VERR_PATCHING_REFUSED;
4313 }
4314
[30572]4315 /* Initialize cache record for guest address translations. */
[36801]4316 bool fInserted;
[30572]4317 PATMP2GLOOKUPREC cacheRec;
4318 RT_ZERO(cacheRec);
4319
[44362]4320 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
[30572]4321 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
[31064]4322
[30572]4323 /* Allocate patch record. */
4324 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4325 if (RT_FAILURE(rc))
4326 {
4327 Log(("Out of memory!!!!\n"));
4328 return VERR_NO_MEMORY;
4329 }
4330 pPatchRec->Core.Key = pInstrGC;
4331 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4332 /* Insert patch record into the lookup tree. */
[36801]4333 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4334 Assert(fInserted);
[30572]4335
[1]4336 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4337 pPatchRec->patch.flags = flags;
[41675]4338 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
[36669]4339 pPatchRec->patch.pTrampolinePatchesHead = NULL;
[1]4340
4341 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4342 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4343
4344 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4345 {
4346 /*
4347 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4348 */
[9228]4349 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
[1]4350 if (pPatchNear)
4351 {
4352 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4353 {
[33595]4354 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
[1]4355
4356 pPatchRec->patch.uState = PATCH_UNUSABLE;
4357 /*
4358 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4359 */
4360 return VERR_PATCHING_REFUSED;
4361 }
4362 }
4363 }
4364
4365 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4366 if (pPatchRec->patch.pTempInfo == 0)
4367 {
4368 Log(("Out of memory!!!!\n"));
4369 return VERR_NO_MEMORY;
4370 }
4371
[41732]4372 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
[1]4373 if (disret == false)
4374 {
4375 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4376 return VERR_PATCHING_REFUSED;
4377 }
4378
[41732]4379 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4380 if (cbInstr > MAX_INSTR_SIZE)
[1]4381 return VERR_PATCHING_REFUSED;
4382
[41732]4383 pPatchRec->patch.cbPrivInstr = cbInstr;
[41737]4384 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
[1]4385
4386 /* Restricted hinting for now. */
[41737]4387 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
[1]4388
[30572]4389 /* Initialize cache record patch pointer. */
4390 cacheRec.pPatch = &pPatchRec->patch;
4391
[1]4392 /* Allocate statistics slot */
4393 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4394 {
4395 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4396 }
4397 else
4398 {
4399 Log(("WARNING: Patch index wrap around!!\n"));
4400 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4401 }
4402
4403 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4404 {
[30572]4405 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
[1]4406 }
4407 else
4408 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4409 {
[30572]4410 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
[1]4411 }
4412 else
4413 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4414 {
4415 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4416 }
4417 else
4418 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4419 {
[30572]4420 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
[1]4421 }
4422 else
4423 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4424 {
[44362]4425 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
[1]4426 }
4427 else
4428 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4429 {
[30572]4430 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
[1]4431 }
4432 else
4433 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4434 {
4435 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4436 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4437
[41732]4438 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
[1]4439#ifdef VBOX_WITH_STATISTICS
4440 if ( rc == VINF_SUCCESS
4441 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4442 {
4443 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4444 }
4445#endif
4446 }
4447 else
4448 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4449 {
[41737]4450 switch (cpu.pCurInstr->uOpcode)
[1]4451 {
4452 case OP_SYSENTER:
[208]4453 case OP_PUSH:
[44362]4454 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
[1]4455 if (rc == VINF_SUCCESS)
4456 {
4457 if (rc == VINF_SUCCESS)
[13834]4458 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
[1]4459 return rc;
4460 }
4461 break;
4462
4463 default:
4464 rc = VERR_NOT_IMPLEMENTED;
4465 break;
4466 }
4467 }
4468 else
4469 {
[41737]4470 switch (cpu.pCurInstr->uOpcode)
[1]4471 {
4472 case OP_SYSENTER:
[44362]4473 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
[1]4474 if (rc == VINF_SUCCESS)
4475 {
[13834]4476 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
[1]4477 return VINF_SUCCESS;
4478 }
4479 break;
4480
4481#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4482 case OP_JO:
4483 case OP_JNO:
4484 case OP_JC:
4485 case OP_JNC:
4486 case OP_JE:
4487 case OP_JNE:
4488 case OP_JBE:
4489 case OP_JNBE:
4490 case OP_JS:
4491 case OP_JNS:
4492 case OP_JP:
4493 case OP_JNP:
4494 case OP_JL:
4495 case OP_JNL:
4496 case OP_JLE:
4497 case OP_JNLE:
4498 case OP_JECXZ:
4499 case OP_LOOP:
4500 case OP_LOOPNE:
4501 case OP_LOOPE:
4502 case OP_JMP:
4503 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4504 {
4505 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4506 break;
4507 }
4508 return VERR_NOT_IMPLEMENTED;
4509#endif
4510
4511 case OP_PUSHF:
4512 case OP_CLI:
[13834]4513 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
[44362]4514 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
[1]4515 break;
4516
[45276]4517#ifndef VBOX_WITH_SAFE_STR
[1]4518 case OP_STR:
[45276]4519#endif
[1]4520 case OP_SGDT:
4521 case OP_SLDT:
4522 case OP_SIDT:
4523 case OP_CPUID:
4524 case OP_LSL:
4525 case OP_LAR:
4526 case OP_SMSW:
4527 case OP_VERW:
4528 case OP_VERR:
4529 case OP_IRET:
[45276]4530#ifdef VBOX_WITH_RAW_RING1
4531 case OP_MOV:
4532#endif
[44362]4533 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
[1]4534 break;
4535
4536 default:
4537 return VERR_NOT_IMPLEMENTED;
4538 }
4539 }
4540
4541 if (rc != VINF_SUCCESS)
4542 {
4543 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4544 {
[93]4545 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
[1]4546 pPatchRec->patch.nrPatch2GuestRecs = 0;
4547 }
4548 pVM->patm.s.uCurrentPatchIdx--;
4549 }
4550 else
4551 {
4552 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4553 AssertRCReturn(rc, rc);
4554
4555 /* Keep track upper and lower boundaries of patched instructions */
4556 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4557 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4558 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4559 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4560
[13834]4561 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4562 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
[1]4563
4564 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4565 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4566
4567 rc = VINF_SUCCESS;
4568
4569 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4570 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4571 {
4572 rc = PATMR3DisablePatch(pVM, pInstrGC);
4573 AssertRCReturn(rc, rc);
4574 }
4575
4576#ifdef VBOX_WITH_STATISTICS
4577 /* Register statistics counter */
4578 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4579 {
4580 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
[13834]4581 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
[1]4582#ifndef DEBUG_sandervl
4583 /* Full breakdown for the GUI. */
4584 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
[54746]4585 "/PATM/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4586 STAMR3RegisterF(pVM, &pPatchRec->patch.pPatchBlockOffset,STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/offPatchBlock", pPatchRec->patch.pPrivInstrGC);
4587 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4588 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4589 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4590 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4591 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4592 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4593 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4594 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4595 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4596 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4597 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4598 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
[1]4599 /// @todo change the state to be a callback so we can get a state mnemonic instead.
[54746]4600 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
[1]4601#endif
4602 }
4603#endif
[46150]4604
4605 /* Add debug symbol. */
4606 patmR3DbgAddPatch(pVM, pPatchRec);
[1]4607 }
[30572]4608 /* Free leftover lock if any. */
4609 if (cacheRec.Lock.pvMap)
4610 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
[1]4611 return rc;
4612}
4613
4614/**
4615 * Query instruction size
4616 *
4617 * @returns VBox status code.
[58122]4618 * @param pVM The cross context VM structure.
[1]4619 * @param pPatch Patch record
4620 * @param pInstrGC Instruction address
4621 */
[9228]4622static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
[1]4623{
[30572]4624 uint8_t *pInstrHC;
4625 PGMPAGEMAPLOCK Lock;
[1]4626
[30572]4627 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
[1]4628 if (rc == VINF_SUCCESS)
4629 {
4630 DISCPUSTATE cpu;
4631 bool disret;
[41732]4632 uint32_t cbInstr;
[1]4633
[41732]4634 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
[30572]4635 PGMPhysReleasePageMappingLock(pVM, &Lock);
[1]4636 if (disret)
[41732]4637 return cbInstr;
[1]4638 }
4639 return 0;
4640}
4641
4642/**
4643 * Add patch to page record
4644 *
4645 * @returns VBox status code.
[58122]4646 * @param pVM The cross context VM structure.
[1]4647 * @param pPage Page address
4648 * @param pPatch Patch record
4649 */
[9228]4650int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
[1]4651{
4652 PPATMPATCHPAGE pPatchPage;
4653 int rc;
4654
[13834]4655 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
[1]4656
[9228]4657 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
[1]4658 if (pPatchPage)
4659 {
4660 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4661 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4662 {
4663 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
[41897]4664 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
[1]4665
4666 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
[41897]4667 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4668 (void **)&pPatchPage->papPatch);
[13833]4669 if (RT_FAILURE(rc))
[1]4670 {
4671 Log(("Out of memory!!!!\n"));
4672 return VERR_NO_MEMORY;
4673 }
[41897]4674 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4675 MMHyperFree(pVM, papPatchOld);
[1]4676 }
[41897]4677 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
[1]4678 pPatchPage->cCount++;
4679 }
4680 else
4681 {
[36801]4682 bool fInserted;
4683
[1]4684 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
[13833]4685 if (RT_FAILURE(rc))
[1]4686 {
4687 Log(("Out of memory!!!!\n"));
4688 return VERR_NO_MEMORY;
4689 }
4690 pPatchPage->Core.Key = pPage;
4691 pPatchPage->cCount = 1;
4692 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4693
[41897]4694 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4695 (void **)&pPatchPage->papPatch);
[13833]4696 if (RT_FAILURE(rc))
[1]4697 {
4698 Log(("Out of memory!!!!\n"));
4699 MMHyperFree(pVM, pPatchPage);
4700 return VERR_NO_MEMORY;
4701 }
[41897]4702 pPatchPage->papPatch[0] = pPatch;
[1]4703
[36801]4704 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4705 Assert(fInserted);
[1]4706 pVM->patm.s.cPageRecords++;
4707
4708 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4709 }
4710 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4711
4712 /* Get the closest guest instruction (from below) */
[9228]4713 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
[1]4714 Assert(pGuestToPatchRec);
4715 if (pGuestToPatchRec)
4716 {
[13834]4717 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
[23]4718 if ( pPatchPage->pLowestAddrGC == 0
[9228]4719 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
[1]4720 {
[9228]4721 RTRCUINTPTR offset;
[1]4722
[9228]4723 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
[1]4724
4725 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
[41897]4726 /* If we're too close to the page boundary, then make sure an
4727 instruction from the previous page doesn't cross the
4728 boundary itself. */
[1]4729 if (offset && offset < MAX_INSTR_SIZE)
4730 {
4731 /* Get the closest guest instruction (from above) */
[9228]4732 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
[23]4733
[1]4734 if (pGuestToPatchRec)
4735 {
[9228]4736 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4737 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
[5609]4738 {
[1]4739 pPatchPage->pLowestAddrGC = pPage;
[13834]4740 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
[5609]4741 }
[1]4742 }
4743 }
4744 }
4745 }
4746
4747 /* Get the closest guest instruction (from above) */
[9228]4748 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
[1]4749 Assert(pGuestToPatchRec);
4750 if (pGuestToPatchRec)
4751 {
[39056]4752 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
[23]4753 if ( pPatchPage->pHighestAddrGC == 0
[9228]4754 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
[1]4755 {
[9228]4756 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
[1]4757 /* Increase by instruction size. */
4758 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
[1833]4759//// Assert(size);
[1]4760 pPatchPage->pHighestAddrGC += size;
[13834]4761 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
[1]4762 }
4763 }
4764
4765 return VINF_SUCCESS;
4766}
4767
4768/**
4769 * Remove patch from page record
4770 *
4771 * @returns VBox status code.
[58122]4772 * @param pVM The cross context VM structure.
[1]4773 * @param pPage Page address
4774 * @param pPatch Patch record
4775 */
[9228]4776int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
[1]4777{
4778 PPATMPATCHPAGE pPatchPage;
4779 int rc;
4780
[9228]4781 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
[1]4782 Assert(pPatchPage);
4783
4784 if (!pPatchPage)
4785 return VERR_INVALID_PARAMETER;
4786
4787 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4788
[13834]4789 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
[1]4790 if (pPatchPage->cCount > 1)
4791 {
4792 uint32_t i;
4793
4794 /* Used by multiple patches */
[41897]4795 for (i = 0; i < pPatchPage->cCount; i++)
[1]4796 {
[41897]4797 if (pPatchPage->papPatch[i] == pPatch)
[1]4798 {
[41897]4799 /* close the gap between the remaining pointers. */
[41898]4800 uint32_t cNew = --pPatchPage->cCount;
4801 if (i < cNew)
4802 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4803 pPatchPage->papPatch[cNew] = NULL;
[41897]4804 return VINF_SUCCESS;
[1]4805 }
4806 }
[41897]4807 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
[1]4808 }
4809 else
4810 {
4811 PPATMPATCHPAGE pPatchNode;
4812
[13834]4813 Log(("patmRemovePatchFromPage %RRv\n", pPage));
[1]4814
4815 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
[9228]4816 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
[1]4817 Assert(pPatchNode && pPatchNode == pPatchPage);
4818
[41897]4819 Assert(pPatchPage->papPatch);
4820 rc = MMHyperFree(pVM, pPatchPage->papPatch);
[1]4821 AssertRC(rc);
4822 rc = MMHyperFree(pVM, pPatchPage);
4823 AssertRC(rc);
4824 pVM->patm.s.cPageRecords--;
4825 }
4826 return VINF_SUCCESS;
4827}
4828
4829/**
4830 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4831 *
4832 * @returns VBox status code.
[58122]4833 * @param pVM The cross context VM structure.
[1]4834 * @param pPatch Patch record
4835 */
4836int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4837{
[9220]4838 int rc;
[9228]4839 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
[1]4840
4841 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
[9228]4842 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4843 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
[1]4844
4845 /** @todo optimize better (large gaps between current and next used page) */
4846 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4847 {
4848 /* Get the closest guest instruction (from above) */
[9228]4849 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
[1]4850 if ( pGuestToPatchRec
4851 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4852 )
4853 {
4854 /* Code in page really patched -> add record */
4855 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4856 AssertRC(rc);
4857 }
4858 }
4859 pPatch->flags |= PATMFL_CODE_MONITORED;
4860 return VINF_SUCCESS;
4861}
4862
4863/**
4864 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4865 *
4866 * @returns VBox status code.
[58122]4867 * @param pVM The cross context VM structure.
[1]4868 * @param pPatch Patch record
4869 */
[44362]4870static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
[1]4871{
4872 int rc;
[9228]4873 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
[1]4874
4875 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
[9228]4876 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4877 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
[1]4878
4879 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4880 {
4881 /* Get the closest guest instruction (from above) */
[9228]4882 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
[1]4883 if ( pGuestToPatchRec
[102]4884 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
[1]4885 )
4886 {
4887 /* Code in page really patched -> remove record */
4888 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4889 AssertRC(rc);
4890 }
4891 }
4892 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4893 return VINF_SUCCESS;
4894}
4895
4896/**
4897 * Notifies PATM about a (potential) write to code that has been patched.
4898 *
4899 * @returns VBox status code.
[58122]4900 * @param pVM The cross context VM structure.
[1]4901 * @param GCPtr GC pointer to write address
4902 * @param cbWrite Nr of bytes to write
4903 *
4904 */
[44362]4905VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
[1]4906{
[9228]4907 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
[1]4908
[13834]4909 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
[1]4910
4911 Assert(VM_IS_EMT(pVM));
[70948]4912 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[1]4913
4914 /* Quick boundary check */
4915 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4916 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4917 )
4918 return VINF_SUCCESS;
4919
4920 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4921
[9228]4922 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4923 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
[1]4924
4925 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4926 {
4927loop_start:
[9228]4928 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
[1]4929 if (pPatchPage)
4930 {
4931 uint32_t i;
4932 bool fValidPatchWrite = false;
4933
[5610]4934 /* Quick check to see if the write is in the patched part of the page */
[9228]4935 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
[5610]4936 || pPatchPage->pHighestAddrGC < GCPtr)
4937 {
4938 break;
4939 }
4940
[1]4941 for (i=0;i<pPatchPage->cCount;i++)
4942 {
[41897]4943 if (pPatchPage->papPatch[i])
[1]4944 {
[41897]4945 PPATCHINFO pPatch = pPatchPage->papPatch[i];
[9228]4946 RTRCPTR pPatchInstrGC;
[1]4947 //unused: bool fForceBreak = false;
4948
[41897]4949 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
[1]4950 /** @todo inefficient and includes redundant checks for multiple pages. */
4951 for (uint32_t j=0; j<cbWrite; j++)
4952 {
[9228]4953 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
[1]4954
4955 if ( pPatch->cbPatchJump
4956 && pGuestPtrGC >= pPatch->pPrivInstrGC
4957 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4958 {
4959 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4960 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4961 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
[12855]4962 if (rc == VINF_SUCCESS)
[31437]4963 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
[12855]4964 goto loop_start;
[1]4965
[12855]4966 continue;
[1]4967 }
4968
[5610]4969 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
[5612]4970 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4971 if (!pPatchInstrGC)
4972 {
[9228]4973 RTRCPTR pClosestInstrGC;
[5612]4974 uint32_t size;
4975
4976 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4977 if (pPatchInstrGC)
4978 {
4979 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4980 Assert(pClosestInstrGC <= pGuestPtrGC);
4981 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4982 /* Check if this is not a write into a gap between two patches */
4983 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4984 pPatchInstrGC = 0;
4985 }
4986 }
[1]4987 if (pPatchInstrGC)
4988 {
[93]4989 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
[1]4990
4991 fValidPatchWrite = true;
4992
[93]4993 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
[1]4994 Assert(pPatchToGuestRec);
4995 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4996 {
[13834]4997 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
[1]4998
4999 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
5000 {
[13834]5001 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
[1]5002
[44362]5003 patmR3MarkDirtyPatch(pVM, pPatch);
[1]5004
[31437]5005 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
[1]5006 goto loop_start;
5007 }
5008 else
5009 {
5010 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
5011 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
5012
5013 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
5014 pPatchToGuestRec->fDirty = true;
5015
5016 *pInstrHC = 0xCC;
5017
5018 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5019 }
5020 }
5021 /* else already marked dirty */
5022 }
5023 }
5024 }
5025 } /* for each patch */
5026
5027 if (fValidPatchWrite == false)
5028 {
5029 /* Write to a part of the page that either:
5030 * - doesn't contain any code (shared code/data); rather unlikely
5031 * - old code page that's no longer in active use.
5032 */
5033invalid_write_loop_start:
[9228]5034 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
[1]5035
5036 if (pPatchPage)
5037 {
5038 for (i=0;i<pPatchPage->cCount;i++)
5039 {
[41897]5040 PPATCHINFO pPatch = pPatchPage->papPatch[i];
[1]5041
[367]5042 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
[1]5043 {
[31437]5044 /* Note: possibly dangerous assumption that all future writes will be harmless. */
[515]5045 if (pPatch->flags & PATMFL_IDTHANDLER)
5046 {
[13834]5047 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
[1]5048
[515]5049 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5050 int rc = patmRemovePatchPages(pVM, pPatch);
5051 AssertRC(rc);
5052 }
5053 else
5054 {
[13834]5055 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
[44362]5056 patmR3MarkDirtyPatch(pVM, pPatch);
[515]5057 }
[31437]5058 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
[1]5059 goto invalid_write_loop_start;
5060 }
[515]5061 } /* for */
[1]5062 }
5063 }
5064 }
5065 }
5066 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5067 return VINF_SUCCESS;
5068
5069}
5070
5071/**
5072 * Disable all patches in a flushed page
5073 *
5074 * @returns VBox status code
[58122]5075 * @param pVM The cross context VM structure.
[1]5076 * @param addr GC address of the page to flush
[45620]5077 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5078 * having to double check if the physical address has changed
[1]5079 */
[44362]5080VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
[1]5081{
[70948]5082 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[45620]5083
[1]5084 addr &= PAGE_BASE_GC_MASK;
5085
[9228]5086 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
[1]5087 if (pPatchPage)
5088 {
5089 int i;
5090
5091 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5092 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5093 {
[41897]5094 if (pPatchPage->papPatch[i])
[1]5095 {
[41897]5096 PPATCHINFO pPatch = pPatchPage->papPatch[i];
[1]5097
[13834]5098 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
[44362]5099 patmR3MarkDirtyPatch(pVM, pPatch);
[1]5100 }
5101 }
5102 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5103 }
5104 return VINF_SUCCESS;
5105}
5106
5107/**
5108 * Checks if the instructions at the specified address has been patched already.
5109 *
5110 * @returns boolean, patched or not
[58122]5111 * @param pVM The cross context VM structure.
[1]5112 * @param pInstrGC Guest context pointer to instruction
5113 */
[44362]5114VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
[1]5115{
[70948]5116 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
[1]5117 PPATMPATCHREC pPatchRec;
[9228]5118 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
[1]5119 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5120 return true;
5121 return false;
5122}
5123
5124/**
5125 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5126 *
5127 * @returns VBox status code.
[58122]5128 * @param pVM The cross context VM structure.
[1]5129 * @param pInstrGC GC address of instr
5130 * @param pByte opcode byte pointer (OUT)
5131 *
5132 */
[12989]5133VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
[1]5134{
5135 PPATMPATCHREC pPatchRec;
5136
5137 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5138
5139 /* Shortcut. */
[45620]5140 if (!PATMIsEnabled(pVM))
[1]5141 return VERR_PATCH_NOT_FOUND;
[70948]5142 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
[45620]5143 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5144 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5145 return VERR_PATCH_NOT_FOUND;
[1]5146
[9228]5147 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
[1]5148 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
[23]5149 if ( pPatchRec
5150 && pPatchRec->patch.uState == PATCH_ENABLED
5151 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
[1]5152 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5153 {
[9228]5154 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
[1]5155 *pByte = pPatchRec->patch.aPrivInstr[offset];
5156
5157 if (pPatchRec->patch.cbPatchJump == 1)
5158 {
[13834]5159 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
[1]5160 }
5161 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5162 return VINF_SUCCESS;
5163 }
5164 return VERR_PATCH_NOT_FOUND;
5165}
5166
5167/**
[41768]5168 * Read instruction bytes of the original code that was overwritten by the 5
5169 * bytes patch jump.
5170 *
5171 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
[58122]5172 * @param pVM The cross context VM structure.
[41768]5173 * @param GCPtrInstr GC address of instr
5174 * @param pbDst The output buffer.
5175 * @param cbToRead The maximum number bytes to read.
5176 * @param pcbRead Where to return the acutal number of bytes read.
5177 */
[44362]5178VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
[41768]5179{
5180 /* Shortcut. */
[45620]5181 if (!PATMIsEnabled(pVM))
5182 return VERR_PATCH_NOT_FOUND;
[70948]5183 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
[45620]5184 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
[41768]5185 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5186 return VERR_PATCH_NOT_FOUND;
5187
5188 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5189
5190 /*
5191 * If the patch is enabled and the pointer lies within 5 bytes of this
5192 * priv instr ptr, then we've got a hit!
5193 */
5194 RTGCPTR32 off;
5195 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5196 GCPtrInstr, false /*fAbove*/);
5197 if ( pPatchRec
5198 && pPatchRec->patch.uState == PATCH_ENABLED
5199 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5200 {
5201 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5202 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5203 if (cbToRead > cbMax)
5204 cbToRead = cbMax;
5205 switch (cbToRead)
5206 {
[69046]5207 case 5: pbDst[4] = pbSrc[4]; RT_FALL_THRU();
5208 case 4: pbDst[3] = pbSrc[3]; RT_FALL_THRU();
5209 case 3: pbDst[2] = pbSrc[2]; RT_FALL_THRU();
5210 case 2: pbDst[1] = pbSrc[1]; RT_FALL_THRU();
[41768]5211 case 1: pbDst[0] = pbSrc[0];
5212 break;
5213 default:
5214 memcpy(pbDst, pbSrc, cbToRead);
5215 }
5216 *pcbRead = cbToRead;
5217
5218 if (pPatchRec->patch.cbPatchJump == 1)
5219 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5220 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5221 return VINF_SUCCESS;
5222 }
5223
5224 return VERR_PATCH_NOT_FOUND;
5225}
5226
5227/**
[1]5228 * Disable patch for privileged instruction at specified location
5229 *
5230 * @returns VBox status code.
[58122]5231 * @param pVM The cross context VM structure.
[58126]5232 * @param pInstrGC Guest context point to privileged instruction
[1]5233 *
5234 * @note returns failure if patching is not allowed or possible
5235 *
5236 */
[44362]5237VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
[1]5238{
5239 PPATMPATCHREC pPatchRec;
5240 PPATCHINFO pPatch;
5241
[13834]5242 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
[70948]5243 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[9228]5244 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
[1]5245 if (pPatchRec)
5246 {
5247 int rc = VINF_SUCCESS;
5248
5249 pPatch = &pPatchRec->patch;
5250
5251 /* Already disabled? */
5252 if (pPatch->uState == PATCH_DISABLED)
5253 return VINF_SUCCESS;
5254
5255 /* Clear the IDT entries for the patch we're disabling. */
[31437]5256 /* Note: very important as we clear IF in the patch itself */
[1]5257 /** @todo this needs to be changed */
5258 if (pPatch->flags & PATMFL_IDTHANDLER)
5259 {
5260 uint32_t iGate;
5261
5262 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5263 if (iGate != (uint32_t)~0)
[544]5264 {
[1]5265 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
[12532]5266 if (++cIDTHandlersDisabled < 256)
[13834]5267 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
[544]5268 }
[1]5269 }
5270
5271 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5272 if ( pPatch->pPatchBlockOffset
5273 && pPatch->uState == PATCH_ENABLED)
5274 {
[13834]5275 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
[1]5276 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5277 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5278 }
5279
5280 /* IDT or function patches haven't changed any guest code. */
5281 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5282 {
5283 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5284 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5285
5286 if (pPatch->uState != PATCH_REFUSED)
5287 {
[30572]5288 uint8_t temp[16];
[1]5289
[30572]5290 Assert(pPatch->cbPatchJump < sizeof(temp));
5291
5292 /* Let's first check if the guest code is still the same. */
5293 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5294 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
[1]5295 if (rc == VINF_SUCCESS)
5296 {
[30572]5297 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
[1]5298
[30572]5299 if ( temp[0] != 0xE9 /* jmp opcode */
5300 || *(RTRCINTPTR *)(&temp[1]) != displ
5301 )
[1]5302 {
[30572]5303 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5304 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5305 /* Remove it completely */
5306 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5307 rc = PATMR3RemovePatch(pVM, pInstrGC);
5308 AssertRC(rc);
5309 return VWRN_PATCH_REMOVED;
[1]5310 }
5311 patmRemoveJumpToPatch(pVM, pPatch);
5312 }
5313 else
5314 {
5315 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5316 pPatch->uState = PATCH_DISABLE_PENDING;
5317 }
5318 }
5319 else
5320 {
5321 AssertMsgFailed(("Patch was refused!\n"));
5322 return VERR_PATCH_ALREADY_DISABLED;
5323 }
5324 }
5325 else
5326 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5327 {
5328 uint8_t temp[16];
5329
5330 Assert(pPatch->cbPatchJump < sizeof(temp));
5331
5332 /* Let's first check if the guest code is still the same. */
[18927]5333 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
[1]5334 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5335 if (rc == VINF_SUCCESS)
5336 {
5337 if (temp[0] != 0xCC)
5338 {
5339 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5340 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5341 /* Remove it completely */
5342 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5343 rc = PATMR3RemovePatch(pVM, pInstrGC);
5344 AssertRC(rc);
5345 return VWRN_PATCH_REMOVED;
5346 }
5347 patmDeactivateInt3Patch(pVM, pPatch);
5348 }
5349 }
5350
5351 if (rc == VINF_SUCCESS)
5352 {
5353 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5354 if (pPatch->uState == PATCH_DISABLE_PENDING)
5355 {
5356 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5357 pPatch->uState = PATCH_UNUSABLE;
5358 }
5359 else
5360 if (pPatch->uState != PATCH_DIRTY)
5361 {
5362 pPatch->uOldState = pPatch->uState;
5363 pPatch->uState = PATCH_DISABLED;
5364 }
5365 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5366 }
5367
[13834]5368 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
[1]5369 return VINF_SUCCESS;
5370 }
5371 Log(("Patch not found!\n"));
5372 return VERR_PATCH_NOT_FOUND;
5373}
5374
5375/**
5376 * Permanently disable patch for privileged instruction at specified location
5377 *
5378 * @returns VBox status code.
[58122]5379 * @param pVM The cross context VM structure.
[58126]5380 * @param pInstrGC Guest context instruction pointer
[1]5381 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5382 * @param pConflictPatch Conflicting patch
5383 *
5384 */
[9228]5385static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
[1]5386{
[39078]5387 NOREF(pConflictAddr);
[1]5388#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
[26263]5389 PATCHINFO patch;
[1]5390 DISCPUSTATE cpu;
[4776]5391 R3PTRTYPE(uint8_t *) pInstrHC;
[41732]5392 uint32_t cbInstr;
[1]5393 bool disret;
5394 int rc;
5395
[26263]5396 RT_ZERO(patch);
[44362]5397 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
[41732]5398 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
[1]5399 /*
5400 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5401 * with one that jumps right into the conflict patch.
5402 * Otherwise we must disable the conflicting patch to avoid serious problems.
5403 */
5404 if ( disret == true
5405 && (pConflictPatch->flags & PATMFL_CODE32)
[41738]5406 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
[41739]5407 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
[1]5408 {
5409 /* Hint patches must be enabled first. */
5410 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5411 {
[13834]5412 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
[1]5413 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5414 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5415 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5416 /* Enabling might fail if the patched code has changed in the meantime. */
5417 if (rc != VINF_SUCCESS)
5418 return rc;
5419 }
5420
5421 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
[13833]5422 if (RT_SUCCESS(rc))
[1]5423 {
[13834]5424 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
[1]5425 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5426 return VINF_SUCCESS;
5427 }
5428 }
[62651]5429#else
5430 RT_NOREF_PV(pInstrGC);
[1]5431#endif
5432
5433 if (pConflictPatch->opcode == OP_CLI)
5434 {
5435 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
[13834]5436 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
[1]5437 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5438 if (rc == VWRN_PATCH_REMOVED)
5439 return VINF_SUCCESS;
[13833]5440 if (RT_SUCCESS(rc))
[1]5441 {
5442 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5443 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5444 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5445 if (rc == VERR_PATCH_NOT_FOUND)
5446 return VINF_SUCCESS; /* removed already */
5447
5448 AssertRC(rc);
[13833]5449 if (RT_SUCCESS(rc))
[1]5450 {
5451 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5452 return VINF_SUCCESS;
5453 }
5454 }
5455 /* else turned into unusable patch (see below) */
5456 }
5457 else
5458 {
[13834]5459 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
[1]5460 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5461 if (rc == VWRN_PATCH_REMOVED)
5462 return VINF_SUCCESS;
5463 }
5464
5465 /* No need to monitor the code anymore. */
5466 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5467 {
5468 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5469 AssertRC(rc);
5470 }
5471 pConflictPatch->uState = PATCH_UNUSABLE;
5472 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5473 return VERR_PATCH_DISABLED;
5474}
5475
5476/**
5477 * Enable patch for privileged instruction at specified location
5478 *
5479 * @returns VBox status code.
[58122]5480 * @param pVM The cross context VM structure.
[58126]5481 * @param pInstrGC Guest context point to privileged instruction
[1]5482 *
5483 * @note returns failure if patching is not allowed or possible
5484 *
5485 */
[44362]5486VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
[1]5487{
5488 PPATMPATCHREC pPatchRec;
5489 PPATCHINFO pPatch;
5490
[13834]5491 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
[70948]5492 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[9228]5493 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
[1]5494 if (pPatchRec)
5495 {
5496 int rc = VINF_SUCCESS;
5497
5498 pPatch = &pPatchRec->patch;
5499
5500 if (pPatch->uState == PATCH_DISABLED)
5501 {
[23]5502 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
[1]5503 {
5504 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
[30572]5505 uint8_t temp[16];
[1]5506
[30572]5507 Assert(pPatch->cbPatchJump < sizeof(temp));
[1]5508
[30572]5509 /* Let's first check if the guest code is still the same. */
5510 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5511 AssertRC(rc2);
5512 if (rc2 == VINF_SUCCESS)
5513 {
[1]5514 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5515 {
5516 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5517 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5518 /* Remove it completely */
[12855]5519 rc = PATMR3RemovePatch(pVM, pInstrGC);
5520 AssertRC(rc);
[1]5521 return VERR_PATCH_NOT_FOUND;
5522 }
5523
[30572]5524 PATMP2GLOOKUPREC cacheRec;
5525 RT_ZERO(cacheRec);
5526 cacheRec.pPatch = pPatch;
5527
5528 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5529 /* Free leftover lock if any. */
5530 if (cacheRec.Lock.pvMap)
5531 {
5532 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5533 cacheRec.Lock.pvMap = NULL;
5534 }
[25777]5535 AssertRC(rc2);
5536 if (RT_FAILURE(rc2))
5537 return rc2;
[1]5538
5539#ifdef DEBUG
5540 {
[30572]5541 DISCPUSTATE cpu;
5542 char szOutput[256];
[41732]5543 uint32_t cbInstr;
[41671]5544 uint32_t i = 0;
[30572]5545 bool disret;
5546 while(i < pPatch->cbPatchJump)
5547 {
[41671]5548 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
[41732]5549 &cpu, &cbInstr, szOutput, sizeof(szOutput));
[30572]5550 Log(("Renewed patch instr: %s", szOutput));
[41732]5551 i += cbInstr;
[30572]5552 }
[1]5553 }
5554#endif
5555 }
5556 }
5557 else
5558 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5559 {
5560 uint8_t temp[16];
5561
5562 Assert(pPatch->cbPatchJump < sizeof(temp));
5563
5564 /* Let's first check if the guest code is still the same. */
[25777]5565 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5566 AssertRC(rc2);
[1]5567
5568 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5569 {
5570 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5571 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
[12855]5572 rc = PATMR3RemovePatch(pVM, pInstrGC);
5573 AssertRC(rc);
[1]5574 return VERR_PATCH_NOT_FOUND;
5575 }
5576
[25777]5577 rc2 = patmActivateInt3Patch(pVM, pPatch);
5578 if (RT_FAILURE(rc2))
5579 return rc2;
[1]5580 }
5581
5582 pPatch->uState = pPatch->uOldState; //restore state
5583
5584 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5585 if (pPatch->pPatchBlockOffset)
5586 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5587
5588 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5589 }
5590 else
[13834]5591 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
[1]5592
5593 return rc;
5594 }
5595 return VERR_PATCH_NOT_FOUND;
5596}
5597
5598/**
5599 * Remove patch for privileged instruction at specified location
5600 *
5601 * @returns VBox status code.
[58122]5602 * @param pVM The cross context VM structure.
[1]5603 * @param pPatchRec Patch record
5604 * @param fForceRemove Remove *all* patches
5605 */
[44362]5606int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
[1]5607{
5608 PPATCHINFO pPatch;
5609
5610 pPatch = &pPatchRec->patch;
5611
5612 /* Strictly forbidden to remove such patches. There can be dependencies!! */
[12855]5613 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5614 {
[13834]5615 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
[12855]5616 return VERR_ACCESS_DENIED;
5617 }
[13834]5618 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
[1]5619
[31437]5620 /* Note: NEVER EVER REUSE PATCH MEMORY */
5621 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
[1]5622
5623 if (pPatchRec->patch.pPatchBlockOffset)
5624 {
[9228]5625 PAVLOU32NODECORE pNode;
[1]5626
[9228]5627 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
[1]5628 Assert(pNode);
5629 }
5630
5631 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5632 {
5633 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5634 AssertRC(rc);
5635 }
5636
5637#ifdef VBOX_WITH_STATISTICS
5638 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5639 {
[46493]5640 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
[54746]5641 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
[1]5642 }
5643#endif
5644
[31437]5645 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
[93]5646 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
[1]5647 pPatch->nrPatch2GuestRecs = 0;
5648 Assert(pPatch->Patch2GuestAddrTree == 0);
5649
5650 patmEmptyTree(pVM, &pPatch->FixupTree);
5651 pPatch->nrFixups = 0;
5652 Assert(pPatch->FixupTree == 0);
5653
5654 if (pPatchRec->patch.pTempInfo)
5655 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5656
[31437]5657 /* Note: might fail, because it has already been removed (e.g. during reset). */
[9228]5658 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
[1]5659
5660 /* Free the patch record */
5661 MMHyperFree(pVM, pPatchRec);
5662 return VINF_SUCCESS;
5663}
5664
5665/**
[36669]5666 * RTAvlU32DoWithAll() worker.
5667 * Checks whether the current trampoline instruction is the jump to the target patch
5668 * and updates the displacement to jump to the new target.
5669 *
5670 * @returns VBox status code.
5671 * @retval VERR_ALREADY_EXISTS if the jump was found.
5672 * @param pNode The current patch to guest record to check.
5673 * @param pvUser The refresh state.
5674 */
[57389]5675static DECLCALLBACK(int) patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
[36669]5676{
5677 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5678 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5679 PVM pVM = pRefreshPatchState->pVM;
5680
5681 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5682
5683 /*
5684 * Check if the patch instruction starts with a jump.
5685 * ASSUMES that there is no other patch to guest record that starts
5686 * with a jump.
5687 */
5688 if (*pPatchInstr == 0xE9)
5689 {
5690 /* Jump found, update the displacement. */
5691 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5692 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5693 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5694
5695 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5696 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5697
5698 *(uint32_t *)&pPatchInstr[1] = displ;
5699 return VERR_ALREADY_EXISTS; /** @todo better return code */
5700 }
5701
5702 return VINF_SUCCESS;
5703}
5704
5705/**
[2030]5706 * Attempt to refresh the patch by recompiling its entire code block
5707 *
5708 * @returns VBox status code.
[58122]5709 * @param pVM The cross context VM structure.
[2030]5710 * @param pPatchRec Patch record
5711 */
5712int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5713{
5714 PPATCHINFO pPatch;
5715 int rc;
[9228]5716 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
[36669]5717 PTRAMPREC pTrampolinePatchesHead = NULL;
[2030]5718
[13834]5719 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
[2030]5720
5721 pPatch = &pPatchRec->patch;
5722 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
[7618]5723 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
[8276]5724 {
[36669]5725 if (!pPatch->pTrampolinePatchesHead)
5726 {
5727 /*
5728 * It is sometimes possible that there are trampoline patches to this patch
5729 * but they are not recorded (after a saved state load for example).
5730 * Refuse to refresh those patches.
5731 * Can hurt performance in theory if the patched code is modified by the guest
5732 * and is executed often. However most of the time states are saved after the guest
5733 * code was modified and is not updated anymore afterwards so this shouldn't be a
5734 * big problem.
5735 */
5736 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5737 return VERR_PATCHING_REFUSED;
5738 }
5739 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5740 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
[8276]5741 }
[2030]5742
[46493]5743 /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
[2030]5744
5745 rc = PATMR3DisablePatch(pVM, pInstrGC);
5746 AssertRC(rc);
5747
[46493]5748 /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
[9228]5749 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
[2030]5750#ifdef VBOX_WITH_STATISTICS
5751 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5752 {
[46493]5753 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
[54746]5754 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
[2030]5755 }
5756#endif
5757
5758 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5759
5760 /* Attempt to install a new patch. */
5761 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
[13833]5762 if (RT_SUCCESS(rc))
[2030]5763 {
[9228]5764 RTRCPTR pPatchTargetGC;
[2030]5765 PPATMPATCHREC pNewPatchRec;
5766
5767 /* Determine target address in new patch */
5768 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5769 Assert(pPatchTargetGC);
5770 if (!pPatchTargetGC)
5771 {
5772 rc = VERR_PATCHING_REFUSED;
5773 goto failure;
5774 }
5775
5776 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5777 pPatch->uCurPatchOffset = 0;
5778
5779 /* insert jump to new patch in old patch block */
5780 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
[13833]5781 if (RT_FAILURE(rc))
[2030]5782 goto failure;
5783
[9228]5784 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
[2030]5785 Assert(pNewPatchRec); /* can't fail */
5786
5787 /* Remove old patch (only do that when everything is finished) */
[44362]5788 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
[2030]5789 AssertRC(rc2);
5790
5791 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
[36801]5792 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
[39034]5793 Assert(fInserted); NOREF(fInserted);
[2030]5794
[39417]5795 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
[2030]5796 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
[12855]5797
5798 /* Used by another patch, so don't remove it! */
5799 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
[36669]5800
5801 if (pTrampolinePatchesHead)
5802 {
5803 /* Update all trampoline patches to jump to the new patch. */
5804 PTRAMPREC pTrampRec = NULL;
5805 PATMREFRESHPATCH RefreshPatch;
5806
5807 RefreshPatch.pVM = pVM;
5808 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5809
5810 pTrampRec = pTrampolinePatchesHead;
5811
5812 while (pTrampRec)
5813 {
5814 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5815
5816 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5817 /*
5818 * We have to find the right patch2guest record because there might be others
5819 * for statistics.
5820 */
5821 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5822 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5823 Assert(rc == VERR_ALREADY_EXISTS);
5824 rc = VINF_SUCCESS;
5825 pTrampRec = pTrampRec->pNext;
5826 }
5827 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5828 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5829 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5830 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5831 }
[2030]5832 }
5833
5834failure:
[13833]5835 if (RT_FAILURE(rc))
[2030]5836 {
[13834]5837 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
[2030]5838
5839 /* Remove the new inactive patch */
5840 rc = PATMR3RemovePatch(pVM, pInstrGC);
5841 AssertRC(rc);
5842
5843 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
[36801]5844 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
[39034]5845 Assert(fInserted); NOREF(fInserted);
[2030]5846
5847 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5848 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5849 AssertRC(rc2);
5850
5851 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5852 }
5853 return rc;
5854}
5855
5856/**
[1]5857 * Find patch for privileged instruction at specified location
5858 *
5859 * @returns Patch structure pointer if found; else NULL
[58122]5860 * @param pVM The cross context VM structure.
[58126]5861 * @param pInstrGC Guest context point to instruction that might lie
5862 * within 5 bytes of an existing patch jump
[1]5863 * @param fIncludeHints Include hinted patches or not
5864 */
[44362]5865PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
[1]5866{
[9228]5867 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
[33540]5868 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
[1]5869 if (pPatchRec)
5870 {
5871 if ( pPatchRec->patch.uState == PATCH_ENABLED
5872 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5873 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5874 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5875 {
[13834]5876 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
[1]5877 return &pPatchRec->patch;
5878 }
5879 else
5880 if ( fIncludeHints
5881 && pPatchRec->patch.uState == PATCH_DISABLED
5882 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5883 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5884 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5885 {
[13834]5886 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
[1]5887 return &pPatchRec->patch;
5888 }
5889 }
5890 return NULL;
5891}
5892
5893/**
5894 * Checks whether the GC address is inside a generated patch jump
5895 *
5896 * @returns true -> yes, false -> no
[58122]5897 * @param pVM The cross context VM structure.
[44362]5898 * @param pAddr Guest context address.
5899 * @param pPatchAddr Guest context patch address (if true).
[1]5900 */
[44362]5901VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
[1]5902{
[9228]5903 RTRCPTR addr;
[1]5904 PPATCHINFO pPatch;
5905
[70948]5906 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
[1]5907 if (PATMIsEnabled(pVM) == false)
5908 return false;
5909
5910 if (pPatchAddr == NULL)
5911 pPatchAddr = &addr;
5912
5913 *pPatchAddr = 0;
5914
[44362]5915 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
[1]5916 if (pPatch)
5917 *pPatchAddr = pPatch->pPrivInstrGC;
[30572]5918
[1]5919 return *pPatchAddr == 0 ? false : true;
5920}
5921
5922/**
5923 * Remove patch for privileged instruction at specified location
5924 *
5925 * @returns VBox status code.
[58122]5926 * @param pVM The cross context VM structure.
[58126]5927 * @param pInstrGC Guest context point to privileged instruction
[1]5928 *
5929 * @note returns failure if patching is not allowed or possible
5930 *
5931 */
[44362]5932VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
[1]5933{
5934 PPATMPATCHREC pPatchRec;
5935
[70948]5936 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[9228]5937 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
[1]5938 if (pPatchRec)
5939 {
5940 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5941 if (rc == VWRN_PATCH_REMOVED)
5942 return VINF_SUCCESS;
[30572]5943
[44362]5944 return patmR3RemovePatch(pVM, pPatchRec, false);
[1]5945 }
5946 AssertFailed();
5947 return VERR_PATCH_NOT_FOUND;
5948}
5949
5950/**
5951 * Mark patch as dirty
5952 *
5953 * @returns VBox status code.
[58122]5954 * @param pVM The cross context VM structure.
[1]5955 * @param pPatch Patch record
5956 *
5957 * @note returns failure if patching is not allowed or possible
5958 *
5959 */
[44362]5960static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
[1]5961{
5962 if (pPatch->pPatchBlockOffset)
5963 {
[13834]5964 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
[1]5965 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5966 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5967 }
5968
5969 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5970 /* Put back the replaced instruction. */
5971 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5972 if (rc == VWRN_PATCH_REMOVED)
5973 return VINF_SUCCESS;
5974
[31437]5975 /* Note: we don't restore patch pages for patches that are not enabled! */
5976 /* Note: be careful when changing this behaviour!! */
[1]5977
5978 /* The patch pages are no longer marked for self-modifying code detection */
5979 if (pPatch->flags & PATMFL_CODE_MONITORED)
5980 {
[25777]5981 rc = patmRemovePatchPages(pVM, pPatch);
[1]5982 AssertRCReturn(rc, rc);
5983 }
5984 pPatch->uState = PATCH_DIRTY;
5985
5986 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5987 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5988
5989 return VINF_SUCCESS;
5990}
5991
5992/**
5993 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5994 *
5995 * @returns VBox status code.
[58122]5996 * @param pVM The cross context VM structure.
[1]5997 * @param pPatch Patch block structure pointer
5998 * @param pPatchGC GC address in patch block
5999 */
[9228]6000RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
[1]6001{
6002 Assert(pPatch->Patch2GuestAddrTree);
6003 /* Get the closest record from below. */
[93]6004 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
[1]6005 if (pPatchToGuestRec)
6006 return pPatchToGuestRec->pOrgInstrGC;
6007
6008 return 0;
6009}
6010
[45620]6011/**
6012 * Converts Guest code GC ptr to Patch code GC ptr (if found)
[1]6013 *
6014 * @returns corresponding GC pointer in patch block
[58122]6015 * @param pVM The cross context VM structure.
[1]6016 * @param pPatch Current patch block pointer
6017 * @param pInstrGC Guest context pointer to privileged instruction
6018 *
6019 */
[9228]6020RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
[1]6021{
6022 if (pPatch->Guest2PatchAddrTree)
6023 {
[9228]6024 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
[1]6025 if (pGuestToPatchRec)
6026 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6027 }
6028
6029 return 0;
6030}
6031
[62651]6032#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
[44362]6033/**
6034 * Converts Guest code GC ptr to Patch code GC ptr (if found)
[5610]6035 *
6036 * @returns corresponding GC pointer in patch block
[58122]6037 * @param pVM The cross context VM structure.
[5610]6038 * @param pInstrGC Guest context pointer to privileged instruction
6039 */
[44362]6040static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
[5610]6041{
[44362]6042 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6043 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6044 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6045 return NIL_RTRCPTR;
[5610]6046}
[62651]6047#endif
[5610]6048
[44362]6049/**
6050 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6051 * identical match)
[1]6052 *
6053 * @returns corresponding GC pointer in patch block
[58122]6054 * @param pVM The cross context VM structure.
[44362]6055 * @param pPatch Current patch block pointer
[1]6056 * @param pInstrGC Guest context pointer to privileged instruction
6057 *
6058 */
[44362]6059RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
[1]6060{
[44362]6061 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6062 if (pGuestToPatchRec)
6063 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6064 return NIL_RTRCPTR;
[1]6065}
6066
6067/**
6068 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6069 *
6070 * @returns original GC instruction pointer or 0 if not found
[58122]6071 * @param pVM The cross context VM structure.
[1]6072 * @param pPatchGC GC address in patch block
6073 * @param pEnmState State of the translated address (out)
6074 *
6075 */
[44362]6076VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
[1]6077{
6078 PPATMPATCHREC pPatchRec;
6079 void *pvPatchCoreOffset;
[9228]6080 RTRCPTR pPrivInstrGC;
[1]6081
6082 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
[70948]6083 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
[9228]6084 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
[1]6085 if (pvPatchCoreOffset == 0)
6086 {
[13834]6087 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
[1]6088 return 0;
6089 }
6090 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6091 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6092 if (pEnmState)
6093 {
6094 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6095 || pPatchRec->patch.uState == PATCH_DIRTY
6096 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6097 || pPatchRec->patch.uState == PATCH_UNUSABLE),
[13834]6098 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
[1]6099
6100 if ( !pPrivInstrGC
6101 || pPatchRec->patch.uState == PATCH_UNUSABLE
6102 || pPatchRec->patch.uState == PATCH_REFUSED)
6103 {
6104 pPrivInstrGC = 0;
6105 *pEnmState = PATMTRANS_FAILED;
6106 }
6107 else
6108 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6109 {
6110 *pEnmState = PATMTRANS_INHIBITIRQ;
6111 }
6112 else
6113 if ( pPatchRec->patch.uState == PATCH_ENABLED
6114 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6115 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6116 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6117 {
6118 *pEnmState = PATMTRANS_OVERWRITTEN;
6119 }
6120 else
[44362]6121 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
[1]6122 {
6123 *pEnmState = PATMTRANS_OVERWRITTEN;
6124 }
6125 else
6126 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6127 {
6128 *pEnmState = PATMTRANS_PATCHSTART;
6129 }
6130 else
6131 *pEnmState = PATMTRANS_SAFE;
6132 }
6133 return pPrivInstrGC;
6134}
6135
6136/**
6137 * Returns the GC pointer of the patch for the specified GC address
6138 *
6139 * @returns VBox status code.
[58122]6140 * @param pVM The cross context VM structure.
[1]6141 * @param pAddrGC Guest context address
6142 */
[44362]6143VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
[1]6144{
6145 PPATMPATCHREC pPatchRec;
6146
[70948]6147 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
[45620]6148
[30572]6149 /* Find the patch record. */
[9228]6150 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
[347]6151 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6152 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
[1]6153 return PATCHCODE_PTR_GC(&pPatchRec->patch);
[44362]6154 return NIL_RTRCPTR;
[1]6155}
6156
6157/**
6158 * Attempt to recover dirty instructions
6159 *
6160 * @returns VBox status code.
[58122]6161 * @param pVM The cross context VM structure.
[41836]6162 * @param pCtx Pointer to the guest CPU context.
6163 * @param pPatch Patch record.
6164 * @param pPatchToGuestRec Patch to guest address record.
6165 * @param pEip GC pointer of trapping instruction.
[1]6166 */
[9228]6167static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
[1]6168{
6169 DISCPUSTATE CpuOld, CpuNew;
6170 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6171 int rc;
[9228]6172 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
[1]6173 uint32_t cbDirty;
6174 PRECPATCHTOGUEST pRec;
[31437]6175 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
[18927]6176 PVMCPU pVCpu = VMMGetCpu0(pVM);
[31437]6177 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
[1]6178
6179 pRec = pPatchToGuestRec;
[31437]6180 pCurInstrGC = pOrgInstrGC;
[1]6181 pCurPatchInstrGC = pEip;
6182 cbDirty = 0;
6183 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6184
6185 /* Find all adjacent dirty instructions */
6186 while (true)
6187 {
[2030]6188 if (pRec->fJumpTarget)
6189 {
[31437]6190 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
[2030]6191 pRec->fDirty = false;
6192 return VERR_PATCHING_REFUSED;
6193 }
6194
[1]6195 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6196 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6197 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6198
6199 /* Only harmless instructions are acceptable. */
[18927]6200 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
[13833]6201 if ( RT_FAILURE(rc)
[41738]6202 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
[15677]6203 {
6204 if (RT_SUCCESS(rc))
[41732]6205 cbDirty += CpuOld.cbInstr;
[15677]6206 else
6207 if (!cbDirty)
6208 cbDirty = 1;
[1]6209 break;
[15677]6210 }
[1]6211
6212#ifdef DEBUG
6213 char szBuf[256];
[44399]6214 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
[30453]6215 szBuf, sizeof(szBuf), NULL);
[1]6216 Log(("DIRTY: %s\n", szBuf));
6217#endif
[15677]6218 /* Mark as clean; if we fail we'll let it always fault. */
6219 pRec->fDirty = false;
6220
[31437]6221 /* Remove old lookup record. */
[1]6222 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
[31438]6223 pPatchToGuestRec = NULL;
[1]6224
[41732]6225 pCurPatchInstrGC += CpuOld.cbInstr;
6226 cbDirty += CpuOld.cbInstr;
[1]6227
6228 /* Let's see if there's another dirty instruction right after. */
[93]6229 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
[1]6230 if (!pRec || !pRec->fDirty)
6231 break; /* no more dirty instructions */
[12835]6232
6233 /* In case of complex instructions the next guest instruction could be quite far off. */
6234 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
[1]6235 }
6236
[13833]6237 if ( RT_SUCCESS(rc)
[41738]6238 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
[1]6239 )
6240 {
6241 uint32_t cbLeft;
6242
6243 pCurPatchInstrHC = pPatchInstrHC;
6244 pCurPatchInstrGC = pEip;
6245 cbLeft = cbDirty;
6246
[13833]6247 while (cbLeft && RT_SUCCESS(rc))
[1]6248 {
6249 bool fValidInstr;
6250
[18927]6251 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
[1]6252
[41738]6253 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
[1]6254 if ( !fValidInstr
[41738]6255 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
[1]6256 )
6257 {
[9228]6258 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
[1]6259
[31437]6260 if ( pTargetGC >= pOrgInstrGC
6261 && pTargetGC <= pOrgInstrGC + cbDirty
[1]6262 )
6263 {
6264 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6265 fValidInstr = true;
6266 }
6267 }
6268
6269 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6270 if ( rc == VINF_SUCCESS
[41732]6271 && CpuNew.cbInstr <= cbLeft /* must still fit */
[1]6272 && fValidInstr
6273 )
6274 {
6275#ifdef DEBUG
6276 char szBuf[256];
[44399]6277 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
[30453]6278 szBuf, sizeof(szBuf), NULL);
[1]6279 Log(("NEW: %s\n", szBuf));
6280#endif
6281
6282 /* Copy the new instruction. */
[41732]6283 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
[1]6284 AssertRC(rc);
6285
6286 /* Add a new lookup record for the duplicated instruction. */
[44362]6287 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
[1]6288 }
6289 else
6290 {
6291#ifdef DEBUG
6292 char szBuf[256];
[44399]6293 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
[30453]6294 szBuf, sizeof(szBuf), NULL);
[1]6295 Log(("NEW: %s (FAILED)\n", szBuf));
6296#endif
[803]6297 /* Restore the old lookup record for the duplicated instruction. */
[44362]6298 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
[803]6299
[804]6300 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
[1]6301 rc = VERR_PATCHING_REFUSED;
6302 break;
6303 }
[41732]6304 pCurInstrGC += CpuNew.cbInstr;
6305 pCurPatchInstrHC += CpuNew.cbInstr;
6306 pCurPatchInstrGC += CpuNew.cbInstr;
6307 cbLeft -= CpuNew.cbInstr;
[26129]6308
6309 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6310 if (!cbLeft)
6311 {
[26130]6312 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6313 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
[26129]6314 {
[26130]6315 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6316 if (pRec)
6317 {
6318 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6319 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
[26129]6320
[26130]6321 Assert(!pRec->fDirty);
[26129]6322
[26130]6323 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6324 if (cbFiller >= SIZEOF_NEARJUMP32)
6325 {
[26129]6326 pPatchFillHC[0] = 0xE9;
[26130]6327 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
[26129]6328#ifdef DEBUG
6329 char szBuf[256];
[44399]6330 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6331 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
[26129]6332 Log(("FILL: %s\n", szBuf));
6333#endif
[26130]6334 }
6335 else
[26129]6336 {
[26130]6337 for (unsigned i = 0; i < cbFiller; i++)
6338 {
6339 pPatchFillHC[i] = 0x90; /* NOP */
[26129]6340#ifdef DEBUG
[26130]6341 char szBuf[256];
[44399]6342 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
[30453]6343 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
[26130]6344 Log(("FILL: %s\n", szBuf));
[26129]6345#endif
[26130]6346 }
[26129]6347 }
6348 }
6349 }
6350 }
[1]6351 }
6352 }
6353 else
6354 rc = VERR_PATCHING_REFUSED;
6355
[13833]6356 if (RT_SUCCESS(rc))
[1]6357 {
6358 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6359 }
6360 else
6361 {
6362 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
[15677]6363 Assert(cbDirty);
6364
[1]6365 /* Mark the whole instruction stream with breakpoints. */
[15677]6366 if (cbDirty)
6367 memset(pPatchInstrHC, 0xCC, cbDirty);
[2030]6368
6369 if ( pVM->patm.s.fOutOfMemory == false
6370 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6371 {
6372 rc = patmR3RefreshPatch(pVM, pPatch);
[13833]6373 if (RT_FAILURE(rc))
[2030]6374 {
[13834]6375 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
[2030]6376 }
6377 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6378 rc = VERR_PATCHING_REFUSED;
6379 }
[1]6380 }
6381 return rc;
6382}
6383
6384/**
6385 * Handle trap inside patch code
6386 *
6387 * @returns VBox status code.
[58122]6388 * @param pVM The cross context VM structure.
[41836]6389 * @param pCtx Pointer to the guest CPU context.
6390 * @param pEip GC pointer of trapping instruction.
6391 * @param ppNewEip GC pointer to new instruction.
[1]6392 */
[44362]6393VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
[1]6394{
6395 PPATMPATCHREC pPatch = 0;
6396 void *pvPatchCoreOffset;
[9228]6397 RTRCUINTPTR offset;
6398 RTRCPTR pNewEip;
[1]6399 int rc ;
6400 PRECPATCHTOGUEST pPatchToGuestRec = 0;
[18927]6401 PVMCPU pVCpu = VMMGetCpu0(pVM);
[1]6402
[70948]6403 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[22890]6404 Assert(pVM->cCpus == 1);
[18927]6405
[1]6406 pNewEip = 0;
6407 *ppNewEip = 0;
6408
6409 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6410
6411 /* Find the patch record. */
[31437]6412 /* Note: there might not be a patch to guest translation record (global function) */
[1]6413 offset = pEip - pVM->patm.s.pPatchMemGC;
[9228]6414 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
[1]6415 if (pvPatchCoreOffset)
6416 {
6417 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6418
[12855]6419 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6420
[1]6421 if (pPatch->patch.uState == PATCH_DIRTY)
6422 {
[13834]6423 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
[12855]6424 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
[1]6425 {
6426 /* Function duplication patches set fPIF to 1 on entry */
6427 pVM->patm.s.pGCStateHC->fPIF = 1;
6428 }
6429 }
6430 else
6431 if (pPatch->patch.uState == PATCH_DISABLED)
6432 {
[13834]6433 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
[12855]6434 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
[1]6435 {
6436 /* Function duplication patches set fPIF to 1 on entry */
6437 pVM->patm.s.pGCStateHC->fPIF = 1;
6438 }
6439 }
6440 else
6441 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6442 {
[9228]6443 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
[1]6444
[13834]6445 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
[1]6446 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
[13834]6447 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6448 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
[1]6449 }
6450
[93]6451 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
[13834]6452 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
[1]6453
6454 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6455 pPatch->patch.cTraps++;
6456 PATM_STAT_FAULT_INC(&pPatch->patch);
6457 }
6458 else
[13834]6459 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
[1]6460
6461 /* Check if we were interrupted in PATM generated instruction code. */
6462 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6463 {
6464 DISCPUSTATE Cpu;
[18927]6465 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
[1]6466 AssertRC(rc);
6467
6468 if ( rc == VINF_SUCCESS
[41737]6469 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6470 || Cpu.pCurInstr->uOpcode == OP_PUSH
6471 || Cpu.pCurInstr->uOpcode == OP_CALL)
[1]6472 )
6473 {
[3073]6474 uint64_t fFlags;
6475
[1]6476 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
[3073]6477
[41737]6478 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
[3073]6479 {
[18988]6480 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
[3073]6481 if ( rc == VINF_SUCCESS
6482 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6483 {
6484 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6485
6486 /* Reset the PATM stack. */
6487 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6488
6489 pVM->patm.s.pGCStateHC->fPIF = 1;
6490
6491 Log(("Faulting push -> go back to the original instruction\n"));
6492
6493 /* continue at the original instruction */
[41727]6494 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
[3073]6495 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6496 return VINF_SUCCESS;
6497 }
6498 }
6499
[1]6500 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
[30326]6501 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
[13834]6502 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
[1]6503 if (rc == VINF_SUCCESS)
6504 {
6505 /* The guest page *must* be present. */
[18988]6506 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
[31064]6507 if ( rc == VINF_SUCCESS
[30572]6508 && (fFlags & X86_PTE_P))
[1]6509 {
6510 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6511 return VINF_PATCH_CONTINUE;
6512 }
6513 }
6514 }
[12835]6515 else
6516 if (pPatch->patch.pPrivInstrGC == pNewEip)
6517 {
6518 /* Invalidated patch or first instruction overwritten.
[12989]6519 * We can ignore the fPIF state in this case.
[12835]6520 */
6521 /* Reset the PATM stack. */
6522 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
[1]6523
[12835]6524 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6525
6526 pVM->patm.s.pGCStateHC->fPIF = 1;
[12989]6527
[12835]6528 /* continue at the original instruction */
[41727]6529 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
[12835]6530 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6531 return VINF_SUCCESS;
6532 }
6533
[1]6534 char szBuf[256];
[44399]6535 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
[1]6536
6537 /* Very bad. We crashed in emitted code. Probably stack? */
6538 if (pPatch)
6539 {
[54727]6540 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6541 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n",
6542 pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags,
6543 pPatchToGuestRec->fDirty, szBuf));
[1]6544 }
6545 else
[54727]6546 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
[18927]6547 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
[54728]6548 EMR3FatalError(pVCpu, VERR_PATM_IPE_TRAP_IN_PATCH_CODE);
[1]6549 }
6550
6551 /* From here on, we must have a valid patch to guest translation. */
6552 if (pvPatchCoreOffset == 0)
6553 {
6554 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
[13834]6555 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
[30572]6556 return VERR_PATCH_NOT_FOUND;
[1]6557 }
6558
6559 /* Take care of dirty/changed instructions. */
6560 if (pPatchToGuestRec->fDirty)
6561 {
[93]6562 Assert(pPatchToGuestRec->Core.Key == offset);
[1]6563 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6564
6565 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
[13833]6566 if (RT_SUCCESS(rc))
[1]6567 {
6568 /* Retry the current instruction. */
6569 pNewEip = pEip;
6570 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6571 }
6572 else
[2030]6573 {
6574 /* Reset the PATM stack. */
6575 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6576
[1]6577 rc = VINF_SUCCESS; /* Continue at original instruction. */
[2030]6578 }
[1]6579
[41727]6580 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
[1]6581 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6582 return rc;
6583 }
6584
6585#ifdef VBOX_STRICT
6586 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6587 {
6588 DISCPUSTATE cpu;
6589 bool disret;
[41732]6590 uint32_t cbInstr;
[30572]6591 PATMP2GLOOKUPREC cacheRec;
6592 RT_ZERO(cacheRec);
6593 cacheRec.pPatch = &pPatch->patch;
[1]6594
[44362]6595 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
[41732]6596 &cpu, &cbInstr);
[30572]6597 if (cacheRec.Lock.pvMap)
6598 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6599
[41737]6600 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
[1]6601 {
[9228]6602 RTRCPTR retaddr;
[25777]6603 PCPUMCTX pCtx2;
[1]6604
[25777]6605 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
[1]6606
[25777]6607 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
[1]6608 AssertRC(rc);
6609
[13834]6610 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6611 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
[1]6612 }
6613 }
6614#endif
6615
[885]6616 /* Return original address, correct by subtracting the CS base address. */
[41727]6617 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
[1]6618
6619 /* Reset the PATM stack. */
6620 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6621
6622 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6623 {
6624 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
[13834]6625 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
[1]6626#ifdef VBOX_STRICT
6627 DISCPUSTATE cpu;
6628 bool disret;
[41732]6629 uint32_t cbInstr;
[30572]6630 PATMP2GLOOKUPREC cacheRec;
6631 RT_ZERO(cacheRec);
6632 cacheRec.pPatch = &pPatch->patch;
[1]6633
[44362]6634 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
[41732]6635 &cpu, &cbInstr);
[30572]6636 if (cacheRec.Lock.pvMap)
6637 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
[1]6638
[41737]6639 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
[1]6640 {
[44362]6641 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
[41732]6642 &cpu, &cbInstr);
[30572]6643 if (cacheRec.Lock.pvMap)
6644 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
[1]6645
[41737]6646 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
[1]6647 }
6648#endif
[19141]6649 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
[1]6650 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6651 }
6652
[13834]6653 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
[44399]6654 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
[1]6655 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6656 {
6657 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
[13834]6658 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
[1]6659 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6660 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6661 return VERR_PATCH_DISABLED;
6662 }
6663
6664#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6665 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6666 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6667 {
[13834]6668 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
[1]6669 //we are only wasting time, back out the patch
6670 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6671 pTrapRec->pNextPatchInstr = 0;
6672 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6673 return VERR_PATCH_DISABLED;
6674 }
6675#endif
6676
6677 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6678 return VINF_SUCCESS;
6679}
6680
6681
6682/**
6683 * Handle page-fault in monitored page
6684 *
6685 * @returns VBox status code.
[58122]6686 * @param pVM The cross context VM structure.
[1]6687 */
[44362]6688VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
[1]6689{
[70948]6690 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_PATM_HM_IPE);
[55889]6691 PVMCPU pVCpu = VMMGetCpu0(pVM);
[45620]6692
[9228]6693 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
[1]6694 addr &= PAGE_BASE_GC_MASK;
6695
[55889]6696 int rc = PGMHandlerVirtualDeregister(pVM, pVCpu, addr, false /*fHypervisor*/);
[1]6697 AssertRC(rc); NOREF(rc);
6698
[9228]6699 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
[1]6700 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6701 {
6702 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
[13834]6703 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
[1]6704 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6705 if (rc == VWRN_PATCH_REMOVED)
6706 return VINF_SUCCESS;
6707
6708 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6709
6710 if (addr == pPatchRec->patch.pPrivInstrGC)
6711 addr++;
6712 }
6713
6714 for(;;)
6715 {
[9228]6716 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
[1]6717
6718 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6719 break;
6720
6721 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6722 {
6723 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
[13834]6724 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
[1]6725 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6726 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6727 }
6728 addr = pPatchRec->patch.pPrivInstrGC + 1;
6729 }
6730
6731 pVM->patm.s.pvFaultMonitor = 0;
6732 return VINF_SUCCESS;
6733}
6734
6735
6736#ifdef VBOX_WITH_STATISTICS
6737
6738static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6739{
6740 if (pPatch->flags & PATMFL_SYSENTER)
6741 {
6742 return "SYSENT";
6743 }
6744 else
6745 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6746 {
6747 static char szTrap[16];
6748 uint32_t iGate;
6749
6750 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6751 if (iGate < 256)
6752 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6753 else
6754 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6755 return szTrap;
6756 }
6757 else
6758 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6759 return "DUPFUNC";
6760 else
6761 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6762 return "FUNCCALL";
6763 else
6764 if (pPatch->flags & PATMFL_TRAMPOLINE)
6765 return "TRAMP";
6766 else
6767 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6768}
6769
6770static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6771{
[39078]6772 NOREF(pVM);
[1]6773 switch(pPatch->uState)
6774 {
6775 case PATCH_ENABLED:
6776 return "ENA";
6777 case PATCH_DISABLED:
6778 return "DIS";
6779 case PATCH_DIRTY:
6780 return "DIR";
6781 case PATCH_UNUSABLE:
6782 return "UNU";
6783 case PATCH_REFUSED:
6784 return "REF";
6785 case PATCH_DISABLE_PENDING:
6786 return "DIP";
6787 default:
6788 AssertFailed();
6789 return " ";
6790 }
6791}
6792
6793/**
6794 * Resets the sample.
[58122]6795 * @param pVM The cross context VM structure.
[1]6796 * @param pvSample The sample registered using STAMR3RegisterCallback.
6797 */
6798static void patmResetStat(PVM pVM, void *pvSample)
6799{
6800 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6801 Assert(pPatch);
6802
6803 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6804 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6805}
6806
6807/**
6808 * Prints the sample into the buffer.
6809 *
[58122]6810 * @param pVM The cross context VM structure.
[1]6811 * @param pvSample The sample registered using STAMR3RegisterCallback.
6812 * @param pszBuf The buffer to print into.
6813 * @param cchBuf The size of the buffer.
6814 */
6815static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6816{
6817 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6818 Assert(pPatch);
6819
6820 Assert(pPatch->uState != PATCH_REFUSED);
6821 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6822
6823 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6824 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6825 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6826}
6827
6828/**
6829 * Returns the GC address of the corresponding patch statistics counter
6830 *
6831 * @returns Stat address
[58122]6832 * @param pVM The cross context VM structure.
[1]6833 * @param pPatch Patch structure
6834 */
[9228]6835RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
[1]6836{
6837 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
[73097]6838 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_UOFFSETOF(STAMRATIOU32, u32A);
[1]6839}
6840
6841#endif /* VBOX_WITH_STATISTICS */
[44399]6842#ifdef VBOX_WITH_DEBUGGER
[1]6843
6844/**
[58122]6845 * @callback_method_impl{FNDBGCCMD, The '.patmoff' command.}
[1]6846 */
[44399]6847static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
[1]6848{
6849 /*
6850 * Validate input.
6851 */
[44399]6852 NOREF(cArgs); NOREF(paArgs);
6853 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6854 PVM pVM = pUVM->pVM;
6855 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
[1]6856
[70948]6857 if (!VM_IS_RAW_MODE_ENABLED(pVM))
6858 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM/NEM.\n");
[45620]6859
[9228]6860 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
[44362]6861 PATMR3AllowPatching(pVM->pUVM, false);
[1]6862 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6863}
6864
6865/**
[58122]6866 * @callback_method_impl{FNDBGCCMD, The '.patmon' command.}
[1]6867 */
[44399]6868static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
[1]6869{
6870 /*
6871 * Validate input.
6872 */
[44399]6873 NOREF(cArgs); NOREF(paArgs);
6874 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6875 PVM pVM = pUVM->pVM;
6876 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
[1]6877
[70948]6878 if (!VM_IS_RAW_MODE_ENABLED(pVM))
6879 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM/NEM.\n");
[45620]6880
[44362]6881 PATMR3AllowPatching(pVM->pUVM, true);
[9228]6882 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
[1]6883 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6884}
[44399]6885
6886#endif /* VBOX_WITH_DEBUGGER */
[58122]6887
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use