Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 61884)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 61885)
@@ -332,4 +332,8 @@
 #endif
 
+if "$(USERNAME)" == "bird" && "$(KBUILD_TARGET)" == "win"
+ VBoxVMM_VMMAll/IEMAll.cpp_CXXFLAGS = /FAcs /Fa$(subst /,\\,$(outbase).cod)
+endif
+
 $(call VBOX_SET_VER_INFO_DLL,VBoxVMM,VirtualBox VMM) # Version info / description.
 
@@ -543,4 +547,8 @@
 
  $(call VBOX_SET_VER_INFO_RC,VMMRC,VirtualBox VMM - raw-mode context parts) # Version info / description.
+
+ if "$(USERNAME)" == "bird" && "$(KBUILD_TARGET)" == "win"
+  VMMRC_VMMAll/IEMAll.cpp_CXXFLAGS = /FAcs /Fa$(subst /,\\,$(outbase).cod)
+ endif
 endif # VBOX_WITH_RAW_MODE && !VBOX_ONLY_EXTPACKS
 
@@ -677,5 +685,10 @@
 
  $(call VBOX_SET_VER_INFO_R0,VMMR0,VirtualBox VMM - ring-0 context parts) # Version info / description.
+
+ if "$(USERNAME)" == "bird" && "$(KBUILD_TARGET)" == "win"
+  VMMR0_VMMAll/IEMAll.cpp_CXXFLAGS = /FAcs /Fa$(subst /,\\,$(outbase).cod)
+ endif
 endif # !VBOX_ONLY_EXTPACKS
+
 
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 61884)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 61885)
@@ -82,5 +82,4 @@
 //#define IEM_LOG_MEMORY_WRITES
 #define IEM_IMPLEMENTS_TASKSWITCH
-
 
 /*********************************************************************************************************************************
@@ -121,5 +120,4 @@
 
 
-
 /*********************************************************************************************************************************
 *   Structures and Typedefs                                                                                                      *
@@ -138,7 +136,20 @@
  */
 
+/** @typedef PFNIEMOPRM
+ * Pointer to an opcode decoder function with RM byte.
+ */
+
+/** @def FNIEMOPRM_DEF
+ * Define an opcode decoder function with RM byte.
+ *
+ * We're using macors for this so that adding and removing parameters as well as
+ * tweaking compiler specific attributes becomes easier.  See FNIEMOP_CALL_1
+ *
+ * @param   a_Name      The function name.
+ */
 
 #if defined(__GNUC__) && defined(RT_ARCH_X86)
 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
+typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PIEMCPU pIemCpu, uint8_t bRm);
 # define FNIEMOP_DEF(a_Name) \
     IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
@@ -150,4 +161,5 @@
 #elif defined(_MSC_VER) && defined(RT_ARCH_X86)
 typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
+typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PIEMCPU pIemCpu, uint8_t bRm);
 # define FNIEMOP_DEF(a_Name) \
     IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
@@ -159,4 +171,5 @@
 #elif defined(__GNUC__)
 typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
+typedef VBOXSTRICTRC (* PFNIEMOPRM)(PIEMCPU pIemCpu, uint8_t bRm);
 # define FNIEMOP_DEF(a_Name) \
     IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
@@ -168,4 +181,5 @@
 #else
 typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
+typedef VBOXSTRICTRC (* PFNIEMOPRM)(PIEMCPU pIemCpu, uint8_t bRm);
 # define FNIEMOP_DEF(a_Name) \
     IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
@@ -176,4 +190,5 @@
 
 #endif
+#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
 
 
@@ -195,4 +210,15 @@
 *   Defined Constants And Macros                                                                                                 *
 *********************************************************************************************************************************/
+/** @def IEM_WITH_SETJMP
+ * Enables alternative status code handling using setjmps.
+ *
+ * This adds a bit of expense via the setjmp() call since it saves all the
+ * non-volatile registers.  However, it eliminates return code checks and allows
+ * for more optimal return value passing (return regs instead of stack buffer).
+ */
+#if defined(DOXYGEN_RUNNING)
+# define IEM_WITH_SETJMP
+#endif
+
 /** Temporary hack to disable the double execution.  Will be removed in favor
  * of a dedicated execution mode in EM. */
@@ -1250,4 +1276,5 @@
 }
 
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1292,4 +1319,41 @@
 }
 
+#else  /* IEM_WITH_SETJMP */
+
+/**
+ * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM state.
+ * @param   pb                  Where to return the opcode byte.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PIEMCPU pIemCpu)
+{
+    VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
+    if (rcStrict == VINF_SUCCESS)
+        return pIemCpu->abOpcode[pIemCpu->offOpcode++];
+    longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+}
+
+
+/**
+ * Fetches the next opcode byte.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM state.
+ * @param   pu8                 Where to return the opcode byte.
+ */
+DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PIEMCPU pIemCpu)
+{
+    unsigned offOpcode = pIemCpu->offOpcode;
+    if (RT_LIKELY((uint8_t)offOpcode < pIemCpu->cbOpcode))
+    {
+        pIemCpu->offOpcode = (uint8_t)offOpcode + 1;
+        return pIemCpu->abOpcode[offOpcode];
+    }
+    return iemOpcodeGetNextU8SlowJmp(pIemCpu);
+}
+
+#endif /* IEM_WITH_SETJMP */
 
 /**
@@ -1299,13 +1363,20 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
     do \
     { \
         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
-        if (rcStrict2 != VINF_SUCCESS) \
+        if (rcStrict2 == VINF_SUCCESS) \
+        { /* likely */ } \
+        else \
             return rcStrict2; \
     } while (0)
-
-
+#else
+# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pIemCpu))
+#endif /* IEM_WITH_SETJMP */
+
+
+#ifndef IEM_WITH_SETJMP
 /**
  * Fetches the next signed byte from the opcode stream.
@@ -1319,4 +1390,5 @@
     return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
 }
+#endif /* !IEM_WITH_SETJMP */
 
 
@@ -1328,5 +1400,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
     do \
     { \
@@ -1335,5 +1408,10 @@
             return rcStrict2; \
     } while (0)
-
+#else /* IEM_WITH_SETJMP */
+# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pIemCpu))
+
+#endif /* IEM_WITH_SETJMP */
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1373,4 +1451,5 @@
 }
 
+#endif /* !IEM_WITH_SETJMP */
 
 /**
@@ -1381,5 +1460,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
     do \
     { \
@@ -1388,5 +1468,9 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pIemCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1426,4 +1510,5 @@
 }
 
+#endif /* !IEM_WITH_SETJMP */
 
 /**
@@ -1434,4 +1519,5 @@
  * @remark Implicitly references pIemCpu.
  */
+#ifndef IEM_WITH_SETJMP
 #define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
     do \
@@ -1441,5 +1527,9 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pIemCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1479,4 +1569,6 @@
 }
 
+#endif /* !IEM_WITH_SETJMP */
+
 
 /**
@@ -1487,5 +1579,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
     do \
     { \
@@ -1494,5 +1587,10 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pIemCpu))
+#endif
+
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1536,4 +1634,45 @@
 }
 
+#else  /* IEM_WITH_SETJMP */
+
+/**
+ * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM state.
+ * @param   pu16                Where to return the opcode word.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PIEMCPU pIemCpu)
+{
+    VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
+    if (rcStrict == VINF_SUCCESS)
+    {
+        uint8_t offOpcode = pIemCpu->offOpcode;
+        pIemCpu->offOpcode += 2;
+        return RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
+    }
+    longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+}
+
+
+/**
+ * Fetches the next opcode word.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM state.
+ * @param   pu16                Where to return the opcode word.
+ */
+DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PIEMCPU pIemCpu)
+{
+    uint8_t const offOpcode = pIemCpu->offOpcode;
+    if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
+        return iemOpcodeGetNextU16SlowJmp(pIemCpu);
+
+    pIemCpu->offOpcode = offOpcode + 2;
+    return RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
+}
+
+#endif /* IEM_WITH_SETJMP */
+
 
 /**
@@ -1543,5 +1682,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
     do \
     { \
@@ -1550,5 +1690,9 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pIemCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1592,4 +1736,6 @@
 }
 
+#endif /* !IEM_WITH_SETJMP */
+
 
 /**
@@ -1600,5 +1746,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
     do \
     { \
@@ -1607,5 +1754,9 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = (int16_t)iemOpcodeGetNextU16Jmp(pIemCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1649,4 +1800,5 @@
 }
 
+#endif /* !IEM_WITH_SETJMP */
 
 /**
@@ -1657,5 +1809,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
     do \
     { \
@@ -1664,6 +1817,10 @@
             return rcStrict2; \
     } while (0)
-
-
+#else
+# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64)  (*(a_pu64) = (int16_t)iemOpcodeGetNextU16Jmp(pIemCpu))
+#endif
+
+
+#ifndef IEM_WITH_SETJMP
 /**
  * Fetches the next signed word from the opcode stream.
@@ -1677,4 +1834,5 @@
     return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
 }
+#endif /* !IEM_WITH_SETJMP */
 
 
@@ -1686,5 +1844,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
     do \
     { \
@@ -1693,5 +1852,9 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pIemCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1741,4 +1904,51 @@
 }
 
+#else  /* !IEM_WITH_SETJMP */
+
+/**
+ * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM state.
+ * @param   pu32                Where to return the opcode dword.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PIEMCPU pIemCpu)
+{
+    VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
+    if (rcStrict == VINF_SUCCESS)
+    {
+        uint8_t offOpcode = pIemCpu->offOpcode;
+        pIemCpu->offOpcode = offOpcode + 4;
+        return RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
+                                   pIemCpu->abOpcode[offOpcode + 1],
+                                   pIemCpu->abOpcode[offOpcode + 2],
+                                   pIemCpu->abOpcode[offOpcode + 3]);
+    }
+    longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+}
+
+
+/**
+ * Fetches the next opcode dword.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM state.
+ * @param   pu32                Where to return the opcode double word.
+ */
+DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PIEMCPU pIemCpu)
+{
+    uint8_t const offOpcode = pIemCpu->offOpcode;
+    if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
+        return iemOpcodeGetNextU32SlowJmp(pIemCpu);
+
+    pIemCpu->offOpcode = offOpcode + 4;
+    return RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
+                               pIemCpu->abOpcode[offOpcode + 1],
+                               pIemCpu->abOpcode[offOpcode + 2],
+                               pIemCpu->abOpcode[offOpcode + 3]);
+}
+
+#endif /* !IEM_WITH_SETJMP */
+
 
 /**
@@ -1748,5 +1958,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
     do \
     { \
@@ -1755,5 +1966,9 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pIemCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1803,4 +2018,6 @@
 }
 
+#endif /* !IEM_WITH_SETJMP */
+
 
 /**
@@ -1811,5 +2028,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
     do \
     { \
@@ -1818,6 +2036,10 @@
             return rcStrict2; \
     } while (0)
-
-
+#else
+# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pIemCpu))
+#endif
+
+
+#ifndef IEM_WITH_SETJMP
 /**
  * Fetches the next signed double word from the opcode stream.
@@ -1831,4 +2053,5 @@
     return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
 }
+#endif
 
 /**
@@ -1839,5 +2062,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
     do \
     { \
@@ -1846,5 +2070,9 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_S32(a_pi32)    (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pIemCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1895,4 +2123,6 @@
 }
 
+#endif /* !IEM_WITH_SETJMP */
+
 
 /**
@@ -1903,5 +2133,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
     do \
     { \
@@ -1910,5 +2141,9 @@
             return rcStrict2; \
     } while (0)
-
+#else
+# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pIemCpu))
+#endif
+
+#ifndef IEM_WITH_SETJMP
 
 /**
@@ -1966,4 +2201,58 @@
 }
 
+#else  /* IEM_WITH_SETJMP */
+
+/**
+ * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM state.
+ * @param   pu64                Where to return the opcode qword.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PIEMCPU pIemCpu)
+{
+    VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
+    if (rcStrict == VINF_SUCCESS)
+    {
+        uint8_t offOpcode = pIemCpu->offOpcode;
+        pIemCpu->offOpcode = offOpcode + 8;
+        return RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
+                                   pIemCpu->abOpcode[offOpcode + 1],
+                                   pIemCpu->abOpcode[offOpcode + 2],
+                                   pIemCpu->abOpcode[offOpcode + 3],
+                                   pIemCpu->abOpcode[offOpcode + 4],
+                                   pIemCpu->abOpcode[offOpcode + 5],
+                                   pIemCpu->abOpcode[offOpcode + 6],
+                                   pIemCpu->abOpcode[offOpcode + 7]);
+    }
+    longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+}
+
+
+/**
+ * Fetches the next opcode qword.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM state.
+ * @param   pu64                Where to return the opcode qword.
+ */
+DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PIEMCPU pIemCpu)
+{
+    uint8_t const offOpcode = pIemCpu->offOpcode;
+    if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
+        return iemOpcodeGetNextU64SlowJmp(pIemCpu);
+
+    pIemCpu->offOpcode = offOpcode + 8;
+    return RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
+                               pIemCpu->abOpcode[offOpcode + 1],
+                               pIemCpu->abOpcode[offOpcode + 2],
+                               pIemCpu->abOpcode[offOpcode + 3],
+                               pIemCpu->abOpcode[offOpcode + 4],
+                               pIemCpu->abOpcode[offOpcode + 5],
+                               pIemCpu->abOpcode[offOpcode + 6],
+                               pIemCpu->abOpcode[offOpcode + 7]);
+}
+
+#endif /* IEM_WITH_SETJMP */
 
 /**
@@ -1973,5 +2262,6 @@
  * @remark Implicitly references pIemCpu.
  */
-#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
     do \
     { \
@@ -1980,4 +2270,7 @@
             return rcStrict2; \
     } while (0)
+#else
+# define IEM_OPCODE_GET_NEXT_U64(a_pu64)    ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pIemCpu) )
+#endif
 
 
@@ -7256,4 +7549,145 @@
 }
 
+#ifdef IEM_WITH_SETJMP
+
+/**
+ * Maps the specified guest memory for the given kind of access, longjmp on
+ * error.
+ *
+ * This may be using bounce buffering of the memory if it's crossing a page
+ * boundary or if there is an access handler installed for any of it.  Because
+ * of lock prefix guarantees, we're in for some extra clutter when this
+ * happens.
+ *
+ * This may raise a \#GP, \#SS, \#PF or \#AC.
+ *
+ * @returns Pointer to the mapped memory.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   cbMem               The number of bytes to map.  This is usually 1,
+ *                              2, 4, 6, 8, 12, 16, 32 or 512.  When used by
+ *                              string operations it can be up to a page.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ *                              Use UINT8_MAX to indicate that no segmentation
+ *                              is required (for IDT, GDT and LDT accesses).
+ * @param   GCPtrMem            The address of the guest memory.
+ * @param   fAccess             How the memory is being accessed.  The
+ *                              IEM_ACCESS_TYPE_XXX bit is used to figure out
+ *                              how to map the memory, while the
+ *                              IEM_ACCESS_WHAT_XXX bit is used when raising
+ *                              exceptions.
+ */
+IEM_STATIC void *iemMemMapJmp(PIEMCPU pIemCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
+{
+    /*
+     * Check the input and figure out which mapping entry to use.
+     */
+    Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
+    Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
+    Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
+
+    unsigned iMemMap = pIemCpu->iNextMapping;
+    if (   iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
+        || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
+    {
+        iMemMap = iemMemMapFindFree(pIemCpu);
+        AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
+                            ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
+                             pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
+                             pIemCpu->aMemMappings[2].fAccess),
+                            longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
+    }
+
+    /*
+     * Map the memory, checking that we can actually access it.  If something
+     * slightly complicated happens, fall back on bounce buffering.
+     */
+    VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
+    if (rcStrict == VINF_SUCCESS) { /*likely*/ }
+    else longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+
+    /* Crossing a page boundary? */
+    if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
+    { /* No (likely). */ }
+    else
+    {
+        void *pvMem;
+        VBOXSTRICTRC rcStrict = iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
+        if (rcStrict == VINF_SUCCESS)
+            return pvMem;
+        longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+    }
+
+    RTGCPHYS GCPhysFirst;
+    rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
+    if (rcStrict == VINF_SUCCESS) { /*likely*/ }
+    else longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+
+    if (fAccess & IEM_ACCESS_TYPE_WRITE)
+        Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
+    if (fAccess & IEM_ACCESS_TYPE_READ)
+        Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
+
+    void *pvMem;
+    rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
+    if (rcStrict == VINF_SUCCESS)
+    { /* likely */ }
+    else
+    {
+        void *pvMem;
+        rcStrict = iemMemBounceBufferMapPhys(pIemCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
+        if (rcStrict == VINF_SUCCESS)
+            return pvMem;
+        longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+    }
+
+    /*
+     * Fill in the mapping table entry.
+     */
+    pIemCpu->aMemMappings[iMemMap].pv      = pvMem;
+    pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
+    pIemCpu->iNextMapping = iMemMap + 1;
+    pIemCpu->cActiveMappings++;
+
+    iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
+    return pvMem;
+}
+
+
+/**
+ * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   pvMem               The mapping.
+ * @param   fAccess             The kind of access.
+ */
+IEM_STATIC void iemMemCommitAndUnmapJmp(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
+{
+    int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
+    AssertStmt(iMemMap >= 0, longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), iMemMap));
+
+    /* If it's bounce buffered, we may need to write back the buffer. */
+    if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
+    {
+        if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
+        {
+            VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap, false /*fPostponeFail*/);
+            if (rcStrict == VINF_SUCCESS)
+                return;
+            longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+        }
+    }
+    /* Otherwise unlock it. */
+    else
+        PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
+
+    /* Free the entry. */
+    pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
+    Assert(pIemCpu->cActiveMappings != 0);
+    pIemCpu->cActiveMappings--;
+}
+
+#endif
 
 #ifndef IN_RING3
@@ -7348,4 +7782,25 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Fetches a data byte, longjmp on error.
+ *
+ * @returns The byte.
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
+{
+    /* The lazy approach for now... */
+    uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
+    uint8_t const  bRet   = *pu8Src;
+    iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
+    return bRet;
+}
+#endif /* IEM_WITH_SETJMP */
+
+
 /**
  * Fetches a data word.
@@ -7372,4 +7827,25 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Fetches a data word, longjmp on error.
+ *
+ * @returns The word
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
+{
+    /* The lazy approach for now... */
+    uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
+    uint16_t const u16Ret = *pu16Src;
+    iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
+    return u16Ret;
+}
+#endif
+
+
 /**
  * Fetches a data dword.
@@ -7396,4 +7872,25 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Fetches a data dword, longjmp on error.
+ *
+ * @returns The dword
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
+{
+    /* The lazy approach for now... */
+    uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
+    uint32_t const  u32Ret  = *pu32Src;
+    iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
+    return u32Ret;
+}
+#endif
+
+
 #ifdef SOME_UNUSED_FUNCTION
 /**
@@ -7450,4 +7947,25 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Fetches a data qword, longjmp on error.
+ *
+ * @returns The qword.
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
+{
+    /* The lazy approach for now... */
+    uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
+    uint64_t const u64Ret = *pu64Src;
+    iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
+    return u64Ret;
+}
+#endif
+
+
 /**
  * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
@@ -7478,4 +7996,32 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Fetches a data qword, longjmp on error.
+ *
+ * @returns The qword.
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ */
+DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
+{
+    /* The lazy approach for now... */
+    /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
+    if (RT_LIKELY(!(GCPtrMem & 15)))
+    {
+        uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
+        uint64_t const u64Ret = *pu64Src;
+        iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
+        return u64Ret;
+    }
+
+    VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pIemCpu);
+    longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
+}
+#endif
+
+
 /**
  * Fetches a data tword.
@@ -7502,4 +8048,24 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Fetches a data tword, longjmp on error.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   pr80Dst             Where to return the tword.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ */
+DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
+{
+    /* The lazy approach for now... */
+    PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pIemCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
+    *pr80Dst = *pr80Src;
+    iemMemCommitAndUnmapJmp(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
+}
+#endif
+
+
 /**
  * Fetches a data dqword (double qword), generally SSE related.
@@ -7526,4 +8092,24 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Fetches a data dqword (double qword), generally SSE related.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   pu128Dst            Where to return the qword.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ */
+IEM_STATIC void iemMemFetchDataU128Jmp(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
+{
+    /* The lazy approach for now... */
+    uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
+    *pu128Dst = *pu128Src;
+    iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
+}
+#endif
+
+
 /**
  * Fetches a data dqword (double qword) at an aligned address, generally SSE
@@ -7557,4 +8143,36 @@
 }
 
+
+#ifdef IEM_WITH_SETJMP
+/**
+ * Fetches a data dqword (double qword) at an aligned address, generally SSE
+ * related, longjmp on error.
+ *
+ * Raises \#GP(0) if not aligned.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   pu128Dst            Where to return the qword.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ */
+DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
+{
+    /* The lazy approach for now... */
+    /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
+    if (   (GCPtrMem & 15) == 0
+        || (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
+    {
+        uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
+                                                                    IEM_ACCESS_DATA_R);
+        *pu128Dst = *pu128Src;
+        iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
+        return;
+    }
+
+    VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pIemCpu);
+    longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+}
+#endif
 
 
@@ -7656,4 +8274,24 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Stores a data byte, longjmp on error.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ * @param   u8Value             The value to store.
+ */
+IEM_STATIC void iemMemStoreDataU8Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
+{
+    /* The lazy approach for now... */
+    uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pIemCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
+    *pu8Dst = u8Value;
+    iemMemCommitAndUnmapJmp(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
+}
+#endif
+
+
 /**
  * Stores a data word.
@@ -7680,4 +8318,24 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Stores a data word, longjmp on error.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ * @param   u16Value            The value to store.
+ */
+IEM_STATIC void iemMemStoreDataU16Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
+{
+    /* The lazy approach for now... */
+    uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pIemCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
+    *pu16Dst = u16Value;
+    iemMemCommitAndUnmapJmp(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
+}
+#endif
+
+
 /**
  * Stores a data dword.
@@ -7704,4 +8362,25 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Stores a data dword.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ * @param   u32Value            The value to store.
+ */
+IEM_STATIC void iemMemStoreDataU32Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
+{
+    /* The lazy approach for now... */
+    uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pIemCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
+    *pu32Dst = u32Value;
+    iemMemCommitAndUnmapJmp(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
+}
+#endif
+
+
 /**
  * Stores a data qword.
@@ -7728,4 +8407,24 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Stores a data qword, longjmp on error.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ * @param   u64Value            The value to store.
+ */
+IEM_STATIC void iemMemStoreDataU64Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
+{
+    /* The lazy approach for now... */
+    uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pIemCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
+    *pu64Dst = u64Value;
+    iemMemCommitAndUnmapJmp(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
+}
+#endif
+
+
 /**
  * Stores a data dqword.
@@ -7752,4 +8451,24 @@
 
 
+#ifdef IEM_WITH_SETJMP
+/**
+ * Stores a data dqword, longjmp on error.
+ *
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ * @param   u128Value            The value to store.
+ */
+IEM_STATIC void iemMemStoreDataU128Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
+{
+    /* The lazy approach for now... */
+    uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pIemCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
+    *pu128Dst = u128Value;
+    iemMemCommitAndUnmapJmp(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
+}
+#endif
+
+
 /**
  * Stores a data dqword, SSE aligned.
@@ -7778,4 +8497,34 @@
     return rc;
 }
+
+
+#ifdef IEM_WITH_SETJMP
+/**
+ * Stores a data dqword, SSE aligned.
+ *
+ * @returns Strict VBox status code.
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   iSegReg             The index of the segment register to use for
+ *                              this access.  The base and limits are checked.
+ * @param   GCPtrMem            The address of the guest memory.
+ * @param   u128Value           The value to store.
+ */
+DECL_NO_INLINE(IEM_STATIC, void)
+iemMemStoreDataU128AlignedSseJmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
+{
+    /* The lazy approach for now... */
+    if (   (GCPtrMem & 15) == 0
+        || (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
+    {
+        uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pIemCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
+        *pu128Dst = u128Value;
+        iemMemCommitAndUnmapJmp(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
+        return;
+    }
+
+    VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pIemCpu);
+    longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
+}
+#endif
 
 
@@ -8666,4 +9415,5 @@
             return rcStrict2; \
     } while (0)
+
 
 #define IEM_MC_ADVANCE_RIP()                            iemRegUpdateRipAndClearRF(pIemCpu)
@@ -8928,52 +9678,109 @@
             = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
 
-#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
-#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
+# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
-#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
+# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
-
-#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+#else
+# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u8Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
+    ((a_u8Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem16)))
+# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
+    ((a_u8Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem32)))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
-#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
-#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
-
-#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+#else
+# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u16Dst) = iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+    ((a_u16Dst) = iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
+    ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
-#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
-#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
-
-#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+#else
+# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u32Dst) = iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+    ((a_u32Dst) = iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
+    ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+#endif
+
+#ifdef SOME_UNUSED_FUNCTION
+# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
-
-#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
-#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
-#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
-    IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
-#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
-
-#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
+#else
+# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u64Dst) = iemMemFetchDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+    ((a_u64Dst) = iemMemFetchDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
+# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
-#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
-#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
-
-#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
+#else
+# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
+    ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
+    iemMemFetchDataR80Jmp(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
-#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
-
-
-
-#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+#else
+# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
+    iemMemFetchDataU128Jmp(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
+# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
+    iemMemFetchDataU128AlignedSseJmp(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
+#endif
+
+
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint8_t u8Tmp; \
@@ -8981,5 +9788,5 @@
         (a_u16Dst) = u8Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint8_t u8Tmp; \
@@ -8987,5 +9794,5 @@
         (a_u32Dst) = u8Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint8_t u8Tmp; \
@@ -8993,5 +9800,5 @@
         (a_u64Dst) = u8Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint16_t u16Tmp; \
@@ -8999,5 +9806,5 @@
         (a_u32Dst) = u16Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint16_t u16Tmp; \
@@ -9005,5 +9812,5 @@
         (a_u64Dst) = u16Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint32_t u32Tmp; \
@@ -9011,6 +9818,21 @@
         (a_u64Dst) = u32Tmp; \
     } while (0)
-
-#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+#else  /* IEM_WITH_SETJMP */
+# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u16Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u32Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u64Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u32Dst) = iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u64Dst) = iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u64Dst) = iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+#endif /* IEM_WITH_SETJMP */
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint8_t u8Tmp; \
@@ -9018,5 +9840,5 @@
         (a_u16Dst) = (int8_t)u8Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint8_t u8Tmp; \
@@ -9024,5 +9846,5 @@
         (a_u32Dst) = (int8_t)u8Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint8_t u8Tmp; \
@@ -9030,5 +9852,5 @@
         (a_u64Dst) = (int8_t)u8Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint16_t u16Tmp; \
@@ -9036,5 +9858,5 @@
         (a_u32Dst) = (int16_t)u16Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint16_t u16Tmp; \
@@ -9042,5 +9864,5 @@
         (a_u64Dst) = (int16_t)u16Tmp; \
     } while (0)
-#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     do { \
         uint32_t u32Tmp; \
@@ -9048,22 +9870,58 @@
         (a_u64Dst) = (int32_t)u32Tmp; \
     } while (0)
-
-#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
+#else  /* IEM_WITH_SETJMP */
+# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
+    ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem)))
+#endif /* IEM_WITH_SETJMP */
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
-#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
+# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
-#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
+# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
-#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
+# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
-
-#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
+#else
+# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
+    iemMemStoreDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
+# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
+    iemMemStoreDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
+# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
+    iemMemStoreDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
+# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
+    iemMemStoreDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
+#endif
+
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
-#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
+# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
-#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
+# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
-#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
+# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
+#else
+# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
+    iemMemStoreDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
+# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
+    iemMemStoreDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
+# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
+    iemMemStoreDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
+# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
+    iemMemStoreDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
+#endif
 
 #define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst,  a_i8C)     *(a_pi8Dst)  = (a_i8C)
@@ -9079,8 +9937,15 @@
     } while (0)
 
-#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
-#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
+# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
+#else
+# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
+    iemMemStoreDataU128Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
+# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
+    iemMemStoreDataU128AlignedSseJmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
+#endif
 
 
@@ -9139,6 +10004,11 @@
 
 /** Calculate efficient address from R/M. */
-#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
+#ifndef IEM_WITH_SETJMP
+# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
     IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
+#else
+# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
+    ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pIemCpu, (bRm), (cbImm)))
+#endif
 
 #define IEM_MC_CALL_VOID_AIMPL_0(a_pfn)                   (a_pfn)()
@@ -9777,5 +10647,5 @@
     Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
     PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
-#define SET_SS_DEF() \
+# define SET_SS_DEF() \
     do \
     { \
@@ -10059,4 +10929,305 @@
     return VINF_SUCCESS;
 }
+
+
+#ifdef IEM_WITH_SETJMP
+/**
+ * Calculates the effective address of a ModR/M memory operand.
+ *
+ * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
+ *
+ * May longjmp on internal error.
+ *
+ * @return  The effective address.
+ * @param   pIemCpu             The IEM per CPU data.
+ * @param   bRm                 The ModRM byte.
+ * @param   cbImm               The size of any immediate following the
+ *                              effective address opcode bytes. Important for
+ *                              RIP relative addressing.
+ */
+IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm)
+{
+    Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
+    PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
+# define SET_SS_DEF() \
+    do \
+    { \
+        if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
+            pIemCpu->iEffSeg = X86_SREG_SS; \
+    } while (0)
+
+    if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
+    {
+/** @todo Check the effective address size crap! */
+        if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
+        {
+            uint16_t u16EffAddr;
+
+            /* Handle the disp16 form with no registers first. */
+            if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
+                IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
+            else
+            {
+                /* Get the displacment. */
+                switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
+                {
+                    case 0:  u16EffAddr = 0;                             break;
+                    case 1:  IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
+                    case 2:  IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);       break;
+                    default: AssertFailedStmt(longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
+                }
+
+                /* Add the base and index registers to the disp. */
+                switch (bRm & X86_MODRM_RM_MASK)
+                {
+                    case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
+                    case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
+                    case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
+                    case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
+                    case 4: u16EffAddr += pCtx->si;            break;
+                    case 5: u16EffAddr += pCtx->di;            break;
+                    case 6: u16EffAddr += pCtx->bp;            SET_SS_DEF(); break;
+                    case 7: u16EffAddr += pCtx->bx;            break;
+                }
+            }
+
+            Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
+            return u16EffAddr;
+        }
+
+        Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
+        uint32_t u32EffAddr;
+
+        /* Handle the disp32 form with no registers first. */
+        if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
+            IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
+        else
+        {
+            /* Get the register (or SIB) value. */
+            switch ((bRm & X86_MODRM_RM_MASK))
+            {
+                case 0: u32EffAddr = pCtx->eax; break;
+                case 1: u32EffAddr = pCtx->ecx; break;
+                case 2: u32EffAddr = pCtx->edx; break;
+                case 3: u32EffAddr = pCtx->ebx; break;
+                case 4: /* SIB */
+                {
+                    uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
+
+                    /* Get the index and scale it. */
+                    switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
+                    {
+                        case 0: u32EffAddr = pCtx->eax; break;
+                        case 1: u32EffAddr = pCtx->ecx; break;
+                        case 2: u32EffAddr = pCtx->edx; break;
+                        case 3: u32EffAddr = pCtx->ebx; break;
+                        case 4: u32EffAddr = 0; /*none */ break;
+                        case 5: u32EffAddr = pCtx->ebp; break;
+                        case 6: u32EffAddr = pCtx->esi; break;
+                        case 7: u32EffAddr = pCtx->edi; break;
+                        IEM_NOT_REACHED_DEFAULT_CASE_RET();
+                    }
+                    u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
+
+                    /* add base */
+                    switch (bSib & X86_SIB_BASE_MASK)
+                    {
+                        case 0: u32EffAddr += pCtx->eax; break;
+                        case 1: u32EffAddr += pCtx->ecx; break;
+                        case 2: u32EffAddr += pCtx->edx; break;
+                        case 3: u32EffAddr += pCtx->ebx; break;
+                        case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
+                        case 5:
+                            if ((bRm & X86_MODRM_MOD_MASK) != 0)
+                            {
+                                u32EffAddr += pCtx->ebp;
+                                SET_SS_DEF();
+                            }
+                            else
+                            {
+                                uint32_t u32Disp;
+                                IEM_OPCODE_GET_NEXT_U32(&u32Disp);
+                                u32EffAddr += u32Disp;
+                            }
+                            break;
+                        case 6: u32EffAddr += pCtx->esi; break;
+                        case 7: u32EffAddr += pCtx->edi; break;
+                        IEM_NOT_REACHED_DEFAULT_CASE_RET();
+                    }
+                    break;
+                }
+                case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
+                case 6: u32EffAddr = pCtx->esi; break;
+                case 7: u32EffAddr = pCtx->edi; break;
+                IEM_NOT_REACHED_DEFAULT_CASE_RET();
+            }
+
+            /* Get and add the displacement. */
+            switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
+            {
+                case 0:
+                    break;
+                case 1:
+                {
+                    int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
+                    u32EffAddr += i8Disp;
+                    break;
+                }
+                case 2:
+                {
+                    uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
+                    u32EffAddr += u32Disp;
+                    break;
+                }
+                default:
+                    AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
+            }
+        }
+
+        if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
+        {
+            Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
+            return u32EffAddr;
+        }
+        Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
+        Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
+        return u32EffAddr & UINT16_MAX;
+    }
+
+    uint64_t u64EffAddr;
+
+    /* Handle the rip+disp32 form with no registers first. */
+    if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
+    {
+        IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
+        u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
+    }
+    else
+    {
+        /* Get the register (or SIB) value. */
+        switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
+        {
+            case  0: u64EffAddr = pCtx->rax; break;
+            case  1: u64EffAddr = pCtx->rcx; break;
+            case  2: u64EffAddr = pCtx->rdx; break;
+            case  3: u64EffAddr = pCtx->rbx; break;
+            case  5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
+            case  6: u64EffAddr = pCtx->rsi; break;
+            case  7: u64EffAddr = pCtx->rdi; break;
+            case  8: u64EffAddr = pCtx->r8;  break;
+            case  9: u64EffAddr = pCtx->r9;  break;
+            case 10: u64EffAddr = pCtx->r10; break;
+            case 11: u64EffAddr = pCtx->r11; break;
+            case 13: u64EffAddr = pCtx->r13; break;
+            case 14: u64EffAddr = pCtx->r14; break;
+            case 15: u64EffAddr = pCtx->r15; break;
+            /* SIB */
+            case 4:
+            case 12:
+            {
+                uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
+
+                /* Get the index and scale it. */
+                switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
+                {
+                    case  0: u64EffAddr = pCtx->rax; break;
+                    case  1: u64EffAddr = pCtx->rcx; break;
+                    case  2: u64EffAddr = pCtx->rdx; break;
+                    case  3: u64EffAddr = pCtx->rbx; break;
+                    case  4: u64EffAddr = 0; /*none */ break;
+                    case  5: u64EffAddr = pCtx->rbp; break;
+                    case  6: u64EffAddr = pCtx->rsi; break;
+                    case  7: u64EffAddr = pCtx->rdi; break;
+                    case  8: u64EffAddr = pCtx->r8;  break;
+                    case  9: u64EffAddr = pCtx->r9;  break;
+                    case 10: u64EffAddr = pCtx->r10; break;
+                    case 11: u64EffAddr = pCtx->r11; break;
+                    case 12: u64EffAddr = pCtx->r12; break;
+                    case 13: u64EffAddr = pCtx->r13; break;
+                    case 14: u64EffAddr = pCtx->r14; break;
+                    case 15: u64EffAddr = pCtx->r15; break;
+                    IEM_NOT_REACHED_DEFAULT_CASE_RET();
+                }
+                u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
+
+                /* add base */
+                switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
+                {
+                    case  0: u64EffAddr += pCtx->rax; break;
+                    case  1: u64EffAddr += pCtx->rcx; break;
+                    case  2: u64EffAddr += pCtx->rdx; break;
+                    case  3: u64EffAddr += pCtx->rbx; break;
+                    case  4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
+                    case  6: u64EffAddr += pCtx->rsi; break;
+                    case  7: u64EffAddr += pCtx->rdi; break;
+                    case  8: u64EffAddr += pCtx->r8;  break;
+                    case  9: u64EffAddr += pCtx->r9;  break;
+                    case 10: u64EffAddr += pCtx->r10; break;
+                    case 11: u64EffAddr += pCtx->r11; break;
+                    case 12: u64EffAddr += pCtx->r12; break;
+                    case 14: u64EffAddr += pCtx->r14; break;
+                    case 15: u64EffAddr += pCtx->r15; break;
+                    /* complicated encodings */
+                    case 5:
+                    case 13:
+                        if ((bRm & X86_MODRM_MOD_MASK) != 0)
+                        {
+                            if (!pIemCpu->uRexB)
+                            {
+                                u64EffAddr += pCtx->rbp;
+                                SET_SS_DEF();
+                            }
+                            else
+                                u64EffAddr += pCtx->r13;
+                        }
+                        else
+                        {
+                            uint32_t u32Disp;
+                            IEM_OPCODE_GET_NEXT_U32(&u32Disp);
+                            u64EffAddr += (int32_t)u32Disp;
+                        }
+                        break;
+                    IEM_NOT_REACHED_DEFAULT_CASE_RET();
+                }
+                break;
+            }
+            IEM_NOT_REACHED_DEFAULT_CASE_RET();
+        }
+
+        /* Get and add the displacement. */
+        switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
+        {
+            case 0:
+                break;
+            case 1:
+            {
+                int8_t i8Disp;
+                IEM_OPCODE_GET_NEXT_S8(&i8Disp);
+                u64EffAddr += i8Disp;
+                break;
+            }
+            case 2:
+            {
+                uint32_t u32Disp;
+                IEM_OPCODE_GET_NEXT_U32(&u32Disp);
+                u64EffAddr += (int32_t)u32Disp;
+                break;
+            }
+            IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
+        }
+
+    }
+
+    if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
+    {
+        Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
+        return u64EffAddr;
+    }
+    Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
+    Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
+    return u64EffAddr & UINT32_MAX;
+}
+#endif /* IEM_WITH_SETJMP */
+
 
 /** @}  */
@@ -11207,6 +12378,19 @@
 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
 {
+#ifdef IEM_WITH_SETJMP
+    VBOXSTRICTRC rcStrict;
+    jmp_buf      JmpBuf;
+    jmp_buf     *pSavedJmpBuf  = pIemCpu->CTX_SUFF(pJmpBuf);
+    pIemCpu->CTX_SUFF(pJmpBuf) = &JmpBuf;
+    if ((rcStrict = setjmp(JmpBuf)) == 0)
+    {
+        uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
+        rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
+    }
+    pIemCpu->CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
+#else
     uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
     VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
+#endif
     if (rcStrict == VINF_SUCCESS)
         pIemCpu->cInstructions++;
@@ -11227,9 +12411,19 @@
         if (rcStrict == VINF_SUCCESS)
         {
-# ifdef LOG_ENABLED
+#ifdef LOG_ENABLED
             iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
-# endif
+#endif
+#ifdef IEM_WITH_SETJMP
+            pIemCpu->CTX_SUFF(pJmpBuf) = &JmpBuf;
+            if ((rcStrict = setjmp(JmpBuf)) == 0)
+            {
+                uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
+                rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
+            }
+            pIemCpu->CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
+#else
             IEM_OPCODE_GET_NEXT_U8(&b);
             rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
+#endif
             if (rcStrict == VINF_SUCCESS)
                 pIemCpu->cInstructions++;
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h	(revision 61884)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h	(revision 61885)
@@ -523,4 +523,12 @@
 
 
+/** Invalid with RM byte . */
+FNIEMOPRM_DEF(iemOp_InvalidWithRM)
+{
+    IEMOP_MNEMONIC("InvalidWithRM");
+    return IEMOP_RAISE_INVALID_OPCODE();
+}
+
+
 
 /** @name ..... opcodes.
@@ -538,5 +546,5 @@
 
 /** Opcode 0x0f 0x00 /0. */
-FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm)
+FNIEMOPRM_DEF(iemOp_Grp6_sldt)
 {
     IEMOP_MNEMONIC("sldt Rv/Mw");
@@ -596,5 +604,5 @@
 
 /** Opcode 0x0f 0x00 /1. */
-FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm)
+FNIEMOPRM_DEF(iemOp_Grp6_str)
 {
     IEMOP_MNEMONIC("str Rv/Mw");
@@ -654,5 +662,5 @@
 
 /** Opcode 0x0f 0x00 /2. */
-FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
+FNIEMOPRM_DEF(iemOp_Grp6_lldt)
 {
     IEMOP_MNEMONIC("lldt Ew");
@@ -686,5 +694,5 @@
 
 /** Opcode 0x0f 0x00 /3. */
-FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
+FNIEMOPRM_DEF(iemOp_Grp6_ltr)
 {
     IEMOP_MNEMONIC("ltr Ew");
@@ -750,5 +758,5 @@
 
 /** Opcode 0x0f 0x00 /4. */
-FNIEMOP_DEF_1(iemOp_Grp6_verr, uint8_t, bRm)
+FNIEMOPRM_DEF(iemOp_Grp6_verr)
 {
     IEMOP_MNEMONIC("verr Ew");
@@ -759,5 +767,5 @@
 
 /** Opcode 0x0f 0x00 /5. */
-FNIEMOP_DEF_1(iemOp_Grp6_verw, uint8_t, bRm)
+FNIEMOPRM_DEF(iemOp_Grp6_verw)
 {
     IEMOP_MNEMONIC("verr Ew");
@@ -767,21 +775,24 @@
 
 
+/**
+ * Group 6 jump table.
+ */
+IEM_STATIC const PFNIEMOPRM g_apfnGroup6[8] =
+{
+    iemOp_Grp6_sldt,
+    iemOp_Grp6_str,
+    iemOp_Grp6_lldt,
+    iemOp_Grp6_ltr,
+    iemOp_Grp6_verr,
+    iemOp_Grp6_verw,
+    iemOp_InvalidWithRM,
+    iemOp_InvalidWithRM
+};
+
 /** Opcode 0x0f 0x00. */
 FNIEMOP_DEF(iemOp_Grp6)
 {
     uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
-    switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
-    {
-        case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
-        case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str,  bRm);
-        case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
-        case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr,  bRm);
-        case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
-        case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
-        case 6: return IEMOP_RAISE_INVALID_OPCODE();
-        case 7: return IEMOP_RAISE_INVALID_OPCODE();
-        IEM_NOT_REACHED_DEFAULT_CASE_RET();
-    }
-
+    return FNIEMOP_CALL_1(g_apfnGroup6[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK], bRm);
 }
 
@@ -7126,5 +7137,5 @@
 
 
-const PFNIEMOP g_apfnTwoByteMap[256] =
+IEM_STATIC const PFNIEMOP g_apfnTwoByteMap[256] =
 {
     /* 0x00 */  iemOp_Grp6,
@@ -14349,5 +14360,5 @@
 
 /** Used by iemOp_EscF1. */
-static const PFNIEMOP g_apfnEscF1_E0toFF[32] =
+IEM_STATIC const PFNIEMOP g_apfnEscF1_E0toFF[32] =
 {
     /* 0xe0 */  iemOp_fchs,
Index: /trunk/src/VBox/VMM/include/IEMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/IEMInternal.h	(revision 61884)
+++ /trunk/src/VBox/VMM/include/IEMInternal.h	(revision 61885)
@@ -23,4 +23,6 @@
 #include <VBox/vmm/stam.h>
 #include <VBox/param.h>
+
+#include <setjmp.h>
 
 
@@ -232,8 +234,14 @@
     /** Pointer to the CPU context - ring-3 context. */
     R3PTRTYPE(PCPUMCTX)     pCtxR3;
+    /** Pointer set jump buffer - ring-3 context. */
+    R3PTRTYPE(jmp_buf *)    pJmpBufR3;
     /** Pointer to the CPU context - ring-0 context. */
     R0PTRTYPE(PCPUMCTX)     pCtxR0;
+    /** Pointer set jump buffer - ring-0 context. */
+    R0PTRTYPE(jmp_buf *)    pJmpBufR0;
     /** Pointer to the CPU context - raw-mode context. */
     RCPTRTYPE(PCPUMCTX)     pCtxRC;
+    /** Pointer set jump buffer - raw-mode context. */
+    RCPTRTYPE(jmp_buf *)    pJmpBufRC;
 
     /** Offset of the VMCPU structure relative to this structure (negative). */
