[23] | 1 | /* $Id: PDMAllCritSect.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
|
---|
[1] | 2 | /** @file
|
---|
[45152] | 3 | * PDM - Write-Only Critical Section, All Contexts.
|
---|
[1] | 4 | */
|
---|
| 5 |
|
---|
| 6 | /*
|
---|
[96407] | 7 | * Copyright (C) 2006-2022 Oracle and/or its affiliates.
|
---|
[1] | 8 | *
|
---|
[96407] | 9 | * This file is part of VirtualBox base platform packages, as
|
---|
| 10 | * available from https://www.virtualbox.org.
|
---|
| 11 | *
|
---|
| 12 | * This program is free software; you can redistribute it and/or
|
---|
| 13 | * modify it under the terms of the GNU General Public License
|
---|
| 14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
| 15 | * License.
|
---|
| 16 | *
|
---|
| 17 | * This program is distributed in the hope that it will be useful, but
|
---|
| 18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
| 20 | * General Public License for more details.
|
---|
| 21 | *
|
---|
| 22 | * You should have received a copy of the GNU General Public License
|
---|
| 23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
| 24 | *
|
---|
| 25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
[1] | 26 | */
|
---|
| 27 |
|
---|
| 28 |
|
---|
[57358] | 29 | /*********************************************************************************************************************************
|
---|
| 30 | * Header Files *
|
---|
| 31 | *********************************************************************************************************************************/
|
---|
[80268] | 32 | #define LOG_GROUP LOG_GROUP_PDM_CRITSECT
|
---|
[35333] | 33 | #include "PDMInternal.h"
|
---|
[35346] | 34 | #include <VBox/vmm/pdmcritsect.h>
|
---|
| 35 | #include <VBox/vmm/mm.h>
|
---|
| 36 | #include <VBox/vmm/vmm.h>
|
---|
[80268] | 37 | #include <VBox/vmm/vmcc.h>
|
---|
[1] | 38 | #include <VBox/err.h>
|
---|
[43387] | 39 | #include <VBox/vmm/hm.h>
|
---|
[1] | 40 |
|
---|
| 41 | #include <VBox/log.h>
|
---|
| 42 | #include <iprt/asm.h>
|
---|
| 43 | #include <iprt/assert.h>
|
---|
[2565] | 44 | #ifdef IN_RING3
|
---|
[25406] | 45 | # include <iprt/lockvalidator.h>
|
---|
[90504] | 46 | #endif
|
---|
| 47 | #if defined(IN_RING3) || defined(IN_RING0)
|
---|
[2565] | 48 | # include <iprt/semaphore.h>
|
---|
[8677] | 49 | #endif
|
---|
[90420] | 50 | #ifdef IN_RING0
|
---|
| 51 | # include <iprt/time.h>
|
---|
| 52 | #endif
|
---|
[31393] | 53 | #if defined(IN_RING3) || defined(IN_RING0)
|
---|
| 54 | # include <iprt/thread.h>
|
---|
| 55 | #endif
|
---|
[1] | 56 |
|
---|
| 57 |
|
---|
[57358] | 58 | /*********************************************************************************************************************************
|
---|
| 59 | * Defined Constants And Macros *
|
---|
| 60 | *********************************************************************************************************************************/
|
---|
[20008] | 61 | /** The number loops to spin for in ring-3. */
|
---|
| 62 | #define PDMCRITSECT_SPIN_COUNT_R3 20
|
---|
| 63 | /** The number loops to spin for in ring-0. */
|
---|
| 64 | #define PDMCRITSECT_SPIN_COUNT_R0 256
|
---|
| 65 | /** The number loops to spin for in the raw-mode context. */
|
---|
| 66 | #define PDMCRITSECT_SPIN_COUNT_RC 256
|
---|
| 67 |
|
---|
| 68 |
|
---|
[62145] | 69 | /** Skips some of the overly paranoid atomic updates.
|
---|
| 70 | * Makes some assumptions about cache coherence, though not brave enough not to
|
---|
| 71 | * always end with an atomic update. */
|
---|
[62146] | 72 | #define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
[62145] | 73 |
|
---|
[25368] | 74 | /* Undefine the automatic VBOX_STRICT API mappings. */
|
---|
| 75 | #undef PDMCritSectEnter
|
---|
| 76 | #undef PDMCritSectTryEnter
|
---|
| 77 |
|
---|
| 78 |
|
---|
[1] | 79 | /**
|
---|
[20008] | 80 | * Gets the ring-3 native thread handle of the calling thread.
|
---|
| 81 | *
|
---|
| 82 | * @returns native thread handle (ring-3).
|
---|
[90346] | 83 | * @param pVM The cross context VM structure.
|
---|
| 84 | * @param pCritSect The critical section. This is used in R0 and RC.
|
---|
[20008] | 85 | */
|
---|
[90346] | 86 | DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PVMCC pVM, PCPDMCRITSECT pCritSect)
|
---|
[20008] | 87 | {
|
---|
| 88 | #ifdef IN_RING3
|
---|
[90346] | 89 | RT_NOREF(pVM, pCritSect);
|
---|
[20008] | 90 | RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
|
---|
[92204] | 91 |
|
---|
| 92 | #elif defined(IN_RING0)
|
---|
[20008] | 93 | AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
|
---|
[26277] | 94 | NIL_RTNATIVETHREAD);
|
---|
[92204] | 95 | RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
|
---|
| 96 | Assert(hNativeSelf != NIL_RTNATIVETHREAD);
|
---|
| 97 |
|
---|
| 98 | #else
|
---|
| 99 | # error "Invalid context"
|
---|
[20008] | 100 | #endif
|
---|
| 101 | return hNativeSelf;
|
---|
| 102 | }
|
---|
| 103 |
|
---|
| 104 |
|
---|
[90504] | 105 | #ifdef IN_RING0
|
---|
[20008] | 106 | /**
|
---|
[90504] | 107 | * Marks the critical section as corrupted.
|
---|
| 108 | */
|
---|
| 109 | DECL_NO_INLINE(static, int) pdmCritSectCorrupted(PPDMCRITSECT pCritSect, const char *pszMsg)
|
---|
| 110 | {
|
---|
| 111 | ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_CORRUPTED);
|
---|
| 112 | LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pCritSect));
|
---|
| 113 | return VERR_PDM_CRITSECT_IPE;
|
---|
| 114 | }
|
---|
| 115 | #endif
|
---|
| 116 |
|
---|
| 117 |
|
---|
| 118 | /**
|
---|
[30325] | 119 | * Tail code called when we've won the battle for the lock.
|
---|
[20008] | 120 | *
|
---|
| 121 | * @returns VINF_SUCCESS.
|
---|
| 122 | *
|
---|
| 123 | * @param pCritSect The critical section.
|
---|
| 124 | * @param hNativeSelf The native handle of this thread.
|
---|
[58116] | 125 | * @param pSrcPos The source position of the lock operation.
|
---|
[20008] | 126 | */
|
---|
[25607] | 127 | DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
|
---|
[20008] | 128 | {
|
---|
[90531] | 129 | Assert(hNativeSelf != NIL_RTNATIVETHREAD);
|
---|
[20008] | 130 | AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
|
---|
| 131 | Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
|
---|
| 132 |
|
---|
[62145] | 133 | # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
| 134 | pCritSect->s.Core.cNestings = 1;
|
---|
| 135 | # else
|
---|
[20755] | 136 | ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
|
---|
[62145] | 137 | # endif
|
---|
[30328] | 138 | Assert(pCritSect->s.Core.cNestings == 1);
|
---|
[20008] | 139 | ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
|
---|
| 140 |
|
---|
[25406] | 141 | # ifdef PDMCRITSECT_STRICT
|
---|
[25614] | 142 | RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
|
---|
[39078] | 143 | # else
|
---|
| 144 | NOREF(pSrcPos);
|
---|
[20008] | 145 | # endif
|
---|
[90531] | 146 | if (pSrcPos)
|
---|
| 147 | Log12Func(("%p: uId=%p ln=%u fn=%s\n", pCritSect, pSrcPos->uId, pSrcPos->uLine, pSrcPos->pszFunction));
|
---|
| 148 | else
|
---|
| 149 | Log12Func(("%p\n", pCritSect));
|
---|
[20008] | 150 |
|
---|
| 151 | STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
|
---|
| 152 | return VINF_SUCCESS;
|
---|
| 153 | }
|
---|
| 154 |
|
---|
| 155 |
|
---|
[31392] | 156 | #if defined(IN_RING3) || defined(IN_RING0)
|
---|
[20008] | 157 | /**
|
---|
[31392] | 158 | * Deals with the contended case in ring-3 and ring-0.
|
---|
[20008] | 159 | *
|
---|
[49999] | 160 | * @retval VINF_SUCCESS on success.
|
---|
| 161 | * @retval VERR_SEM_DESTROYED if destroyed.
|
---|
| 162 | *
|
---|
[90420] | 163 | * @param pVM The cross context VM structure.
|
---|
| 164 | * @param pVCpu The cross context virtual CPU structure if ring-0 and on
|
---|
| 165 | * an EMT, otherwise NULL.
|
---|
| 166 | * @param pCritSect The critsect.
|
---|
| 167 | * @param hNativeSelf The native thread handle.
|
---|
| 168 | * @param pSrcPos The source position of the lock operation.
|
---|
| 169 | * @param rcBusy The status code to return when we're in RC or R0
|
---|
[20008] | 170 | */
|
---|
[90420] | 171 | static int pdmR3R0CritSectEnterContended(PVMCC pVM, PVMCPU pVCpu, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf,
|
---|
[90381] | 172 | PCRTLOCKVALSRCPOS pSrcPos, int rcBusy)
|
---|
[20008] | 173 | {
|
---|
[90531] | 174 | # ifdef IN_RING0
|
---|
[20008] | 175 | /*
|
---|
[90531] | 176 | * If we've got queued critical section leave operations and rcBusy isn't
|
---|
| 177 | * VINF_SUCCESS, return to ring-3 immediately to avoid deadlocks.
|
---|
| 178 | */
|
---|
| 179 | if ( !pVCpu
|
---|
| 180 | || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)
|
---|
| 181 | || rcBusy == VINF_SUCCESS )
|
---|
| 182 | { /* likely */ }
|
---|
| 183 | else
|
---|
| 184 | {
|
---|
| 185 | /** @todo statistics. */
|
---|
| 186 | STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
|
---|
| 187 | return rcBusy;
|
---|
| 188 | }
|
---|
| 189 | # endif
|
---|
| 190 |
|
---|
| 191 | /*
|
---|
[20008] | 192 | * Start waiting.
|
---|
| 193 | */
|
---|
| 194 | if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
|
---|
[25478] | 195 | return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
|
---|
[31392] | 196 | # ifdef IN_RING3
|
---|
[87935] | 197 | STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
|
---|
[31392] | 198 | # else
|
---|
[87935] | 199 | STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
|
---|
[31392] | 200 | # endif
|
---|
[20008] | 201 |
|
---|
| 202 | /*
|
---|
| 203 | * The wait loop.
|
---|
[90420] | 204 | *
|
---|
| 205 | * This handles VERR_TIMEOUT and VERR_INTERRUPTED.
|
---|
[20008] | 206 | */
|
---|
[90504] | 207 | STAM_REL_PROFILE_START(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
|
---|
[90420] | 208 | PSUPDRVSESSION const pSession = pVM->pSession;
|
---|
| 209 | SUPSEMEVENT const hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
|
---|
[31392] | 210 | # ifdef IN_RING3
|
---|
| 211 | # ifdef PDMCRITSECT_STRICT
|
---|
[90420] | 212 | RTTHREAD const hThreadSelf = RTThreadSelfAutoAdopt();
|
---|
[25685] | 213 | int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
|
---|
[25467] | 214 | if (RT_FAILURE(rc2))
|
---|
| 215 | return rc2;
|
---|
[31392] | 216 | # else
|
---|
[90420] | 217 | RTTHREAD const hThreadSelf = RTThreadSelf();
|
---|
[31392] | 218 | # endif
|
---|
[90420] | 219 | # else /* IN_RING0 */
|
---|
| 220 | uint64_t const tsStart = RTTimeNanoTS();
|
---|
[90910] | 221 | uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
|
---|
| 222 | uint64_t cNsMaxTotal = cNsMaxTotalDef;
|
---|
[90420] | 223 | uint64_t const cNsMaxRetry = RT_NS_15SEC;
|
---|
| 224 | uint32_t cMsMaxOne = RT_MS_5SEC;
|
---|
[90468] | 225 | bool fNonInterruptible = false;
|
---|
[20008] | 226 | # endif
|
---|
[49999] | 227 | for (;;)
|
---|
[20008] | 228 | {
|
---|
[49998] | 229 | /*
|
---|
| 230 | * Do the wait.
|
---|
| 231 | *
|
---|
| 232 | * In ring-3 this gets cluttered by lock validation and thread state
|
---|
| 233 | * maintainence.
|
---|
| 234 | *
|
---|
| 235 | * In ring-0 we have to deal with the possibility that the thread has
|
---|
| 236 | * been signalled and the interruptible wait function returning
|
---|
[49999] | 237 | * immediately. In that case we do normal R0/RC rcBusy handling.
|
---|
[90381] | 238 | *
|
---|
| 239 | * We always do a timed wait here, so the event handle is revalidated
|
---|
| 240 | * regularly and we won't end up stuck waiting for a destroyed critsect.
|
---|
[49998] | 241 | */
|
---|
[90381] | 242 | /** @todo Make SUPSemEventClose wake up all waiters. */
|
---|
[49998] | 243 | # ifdef IN_RING3
|
---|
| 244 | # ifdef PDMCRITSECT_STRICT
|
---|
[25614] | 245 | int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
|
---|
[25618] | 246 | !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
|
---|
[25685] | 247 | RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
|
---|
[25478] | 248 | if (RT_FAILURE(rc9))
|
---|
| 249 | return rc9;
|
---|
[49998] | 250 | # else
|
---|
[25638] | 251 | RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
|
---|
[49998] | 252 | # endif
|
---|
[90420] | 253 | int const rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
|
---|
[25478] | 254 | RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
|
---|
[49998] | 255 | # else /* IN_RING0 */
|
---|
[90468] | 256 | int const rc = !fNonInterruptible
|
---|
| 257 | ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
|
---|
| 258 | : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
|
---|
[90531] | 259 | Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hOwner=%p)\n",
|
---|
| 260 | pCritSect, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pCritSect->s.Core.NativeThreadOwner));
|
---|
[49998] | 261 | # endif /* IN_RING0 */
|
---|
[25478] | 262 |
|
---|
[49998] | 263 | /*
|
---|
[90420] | 264 | * Make sure the critical section hasn't been delete before continuing.
|
---|
[49998] | 265 | */
|
---|
[90381] | 266 | if (RT_LIKELY(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC))
|
---|
| 267 | { /* likely */ }
|
---|
| 268 | else
|
---|
[90420] | 269 | {
|
---|
| 270 | LogRel(("PDMCritSectEnter: Destroyed while waiting; pCritSect=%p rc=%Rrc\n", pCritSect, rc));
|
---|
[20008] | 271 | return VERR_SEM_DESTROYED;
|
---|
[90420] | 272 | }
|
---|
| 273 |
|
---|
| 274 | /*
|
---|
| 275 | * Most likely we're here because we got signalled.
|
---|
| 276 | */
|
---|
[20008] | 277 | if (rc == VINF_SUCCESS)
|
---|
[90420] | 278 | {
|
---|
[90504] | 279 | STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
|
---|
[25478] | 280 | return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
|
---|
[90381] | 281 | }
|
---|
[50000] | 282 |
|
---|
[90420] | 283 | /*
|
---|
| 284 | * Timeout and interrupted waits needs careful handling in ring-0
|
---|
| 285 | * because we're cooperating with ring-3 on this critical section
|
---|
| 286 | * and thus need to make absolutely sure we won't get stuck here.
|
---|
| 287 | *
|
---|
| 288 | * The r0 interrupted case means something is pending (termination,
|
---|
| 289 | * signal, APC, debugger, whatever), so we must try our best to
|
---|
| 290 | * return to the caller and to ring-3 so it can be dealt with.
|
---|
| 291 | */
|
---|
[90513] | 292 | if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED))
|
---|
[90420] | 293 | {
|
---|
[49999] | 294 | # ifdef IN_RING0
|
---|
[90420] | 295 | uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
|
---|
| 296 | int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
|
---|
| 297 | AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
|
---|
| 298 | ("rcTerm=%Rrc\n", rcTerm));
|
---|
[90910] | 299 | if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
|
---|
[90420] | 300 | cNsMaxTotal = RT_NS_1MIN;
|
---|
[50000] | 301 |
|
---|
[90420] | 302 | if (rc == VERR_TIMEOUT)
|
---|
| 303 | {
|
---|
| 304 | /* Try return get out of here with a non-VINF_SUCCESS status if
|
---|
| 305 | the thread is terminating or if the timeout has been exceeded. */
|
---|
[90468] | 306 | STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrTimeout);
|
---|
[90420] | 307 | if ( rcTerm != VINF_THREAD_IS_TERMINATING
|
---|
| 308 | && cNsElapsed <= cNsMaxTotal)
|
---|
| 309 | continue;
|
---|
| 310 | }
|
---|
| 311 | else
|
---|
| 312 | {
|
---|
[90468] | 313 | /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
|
---|
| 314 | we will try non-interruptible sleep for a while to help resolve the issue
|
---|
| 315 | w/o guru'ing. */
|
---|
| 316 | STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrInterrupted);
|
---|
[90420] | 317 | if ( rcTerm != VINF_THREAD_IS_TERMINATING
|
---|
| 318 | && rcBusy == VINF_SUCCESS
|
---|
| 319 | && pVCpu != NULL
|
---|
| 320 | && cNsElapsed <= cNsMaxTotal)
|
---|
[90468] | 321 | {
|
---|
| 322 | if (!fNonInterruptible)
|
---|
| 323 | {
|
---|
| 324 | STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectNonInterruptibleWaits);
|
---|
| 325 | fNonInterruptible = true;
|
---|
| 326 | cMsMaxOne = 32;
|
---|
| 327 | uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
|
---|
| 328 | if (cNsLeft > RT_NS_10SEC)
|
---|
| 329 | cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
|
---|
| 330 | }
|
---|
[90420] | 331 | continue;
|
---|
[90468] | 332 | }
|
---|
[90420] | 333 | }
|
---|
| 334 |
|
---|
| 335 | /*
|
---|
| 336 | * Let try get out of here. We must very carefully undo the
|
---|
| 337 | * cLockers increment we did using compare-and-exchange so that
|
---|
| 338 | * we don't race the semaphore signalling in PDMCritSectLeave
|
---|
| 339 | * and end up with spurious wakeups and two owners at once.
|
---|
| 340 | */
|
---|
| 341 | uint32_t cNoIntWaits = 0;
|
---|
| 342 | uint32_t cCmpXchgs = 0;
|
---|
| 343 | int32_t cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
|
---|
| 344 | for (;;)
|
---|
| 345 | {
|
---|
| 346 | if (pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC)
|
---|
| 347 | {
|
---|
| 348 | if (cLockers > 0 && cCmpXchgs < _64M)
|
---|
| 349 | {
|
---|
| 350 | bool fRc = ASMAtomicCmpXchgExS32(&pCritSect->s.Core.cLockers, cLockers - 1, cLockers, &cLockers);
|
---|
| 351 | if (fRc)
|
---|
| 352 | {
|
---|
| 353 | LogFunc(("Aborting wait on %p (rc=%Rrc rcTerm=%Rrc cNsElapsed=%'RU64) -> %Rrc\n", pCritSect,
|
---|
| 354 | rc, rcTerm, cNsElapsed, rcBusy != VINF_SUCCESS ? rcBusy : rc));
|
---|
| 355 | STAM_REL_COUNTER_INC(&pVM->pdm.s.StatAbortedCritSectEnters);
|
---|
| 356 | return rcBusy != VINF_SUCCESS ? rcBusy : rc;
|
---|
| 357 | }
|
---|
| 358 | cCmpXchgs++;
|
---|
[90531] | 359 | if ((cCmpXchgs & 0xffff) == 0)
|
---|
| 360 | Log11Func(("%p: cLockers=%d cCmpXchgs=%u (hOwner=%p)\n",
|
---|
| 361 | pCritSect, cLockers, cCmpXchgs, pCritSect->s.Core.NativeThreadOwner));
|
---|
[90420] | 362 | ASMNopPause();
|
---|
| 363 | continue;
|
---|
| 364 | }
|
---|
| 365 |
|
---|
| 366 | if (cLockers == 0)
|
---|
| 367 | {
|
---|
| 368 | /*
|
---|
| 369 | * We are racing someone in PDMCritSectLeave.
|
---|
| 370 | *
|
---|
| 371 | * For the VERR_TIMEOUT case we'll just retry taking it the normal
|
---|
| 372 | * way for a while. For VERR_INTERRUPTED we're in for more fun as
|
---|
| 373 | * the previous owner might not have signalled the semaphore yet,
|
---|
| 374 | * so we'll do a short non-interruptible wait instead and then guru.
|
---|
| 375 | */
|
---|
| 376 | if ( rc == VERR_TIMEOUT
|
---|
| 377 | && RTTimeNanoTS() - tsStart <= cNsMaxTotal + cNsMaxRetry)
|
---|
| 378 | break;
|
---|
| 379 |
|
---|
| 380 | if ( rc == VERR_INTERRUPTED
|
---|
| 381 | && ( cNoIntWaits == 0
|
---|
| 382 | || RTTimeNanoTS() - (tsStart + cNsElapsed) < RT_NS_100MS))
|
---|
| 383 | {
|
---|
| 384 | int const rc2 = SUPSemEventWait(pSession, hEvent, 1 /*ms*/);
|
---|
| 385 | if (rc2 == VINF_SUCCESS)
|
---|
| 386 | {
|
---|
| 387 | STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectEntersWhileAborting);
|
---|
[90504] | 388 | STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
|
---|
[90420] | 389 | return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
|
---|
| 390 | }
|
---|
| 391 | cNoIntWaits++;
|
---|
| 392 | cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
|
---|
| 393 | continue;
|
---|
| 394 | }
|
---|
| 395 | }
|
---|
| 396 | else
|
---|
| 397 | LogFunc(("Critical section %p has a broken cLockers count. Aborting.\n", pCritSect));
|
---|
| 398 |
|
---|
| 399 | /* Sabotage the critical section and return error to caller. */
|
---|
| 400 | ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_FAILED_ABORT);
|
---|
| 401 | LogRel(("PDMCritSectEnter: Failed to abort wait on pCritSect=%p (rc=%Rrc rcTerm=%Rrc)\n",
|
---|
| 402 | pCritSect, rc, rcTerm));
|
---|
| 403 | return VERR_PDM_CRITSECT_ABORT_FAILED;
|
---|
| 404 | }
|
---|
| 405 | LogRel(("PDMCritSectEnter: Destroyed while aborting wait; pCritSect=%p/%#x rc=%Rrc rcTerm=%Rrc\n",
|
---|
| 406 | pCritSect, pCritSect->s.Core.u32Magic, rc, rcTerm));
|
---|
| 407 | return VERR_SEM_DESTROYED;
|
---|
| 408 | }
|
---|
| 409 |
|
---|
| 410 | /* We get here if we timed out. Just retry now that it
|
---|
| 411 | appears someone left already. */
|
---|
[90513] | 412 | Assert(rc == VERR_TIMEOUT);
|
---|
[90420] | 413 | cMsMaxOne = 10 /*ms*/;
|
---|
| 414 |
|
---|
| 415 | # else /* IN_RING3 */
|
---|
| 416 | RT_NOREF(pVM, pVCpu, rcBusy);
|
---|
| 417 | # endif /* IN_RING3 */
|
---|
| 418 | }
|
---|
| 419 | /*
|
---|
| 420 | * Any other return code is fatal.
|
---|
[90390] | 421 | */
|
---|
[90420] | 422 | else
|
---|
| 423 | {
|
---|
| 424 | AssertMsgFailed(("rc=%Rrc\n", rc));
|
---|
| 425 | return RT_FAILURE_NP(rc) ? rc : -rc;
|
---|
| 426 | }
|
---|
[20008] | 427 | }
|
---|
| 428 | /* won't get here */
|
---|
| 429 | }
|
---|
[31392] | 430 | #endif /* IN_RING3 || IN_RING0 */
|
---|
[20008] | 431 |
|
---|
| 432 |
|
---|
| 433 | /**
|
---|
[25368] | 434 | * Common worker for the debug and normal APIs.
|
---|
[1] | 435 | *
|
---|
| 436 | * @returns VINF_SUCCESS if entered successfully.
|
---|
[90381] | 437 | * @returns rcBusy when encountering a busy critical section in RC/R0.
|
---|
[45152] | 438 | * @retval VERR_SEM_DESTROYED if the critical section is delete before or
|
---|
| 439 | * during the operation.
|
---|
[1] | 440 | *
|
---|
[90346] | 441 | * @param pVM The cross context VM structure.
|
---|
[1] | 442 | * @param pCritSect The PDM critical section to enter.
|
---|
[90381] | 443 | * @param rcBusy The status code to return when we're in RC or R0
|
---|
[58116] | 444 | * @param pSrcPos The source position of the lock operation.
|
---|
[1] | 445 | */
|
---|
[90346] | 446 | DECL_FORCE_INLINE(int) pdmCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
|
---|
[1] | 447 | {
|
---|
| 448 | Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
|
---|
[30328] | 449 | Assert(pCritSect->s.Core.cNestings >= 0);
|
---|
[90504] | 450 | #if defined(VBOX_STRICT) && defined(IN_RING0)
|
---|
| 451 | /* Hope we're not messing with critical sections while in the no-block
|
---|
| 452 | zone, that would complicate things a lot. */
|
---|
| 453 | PVMCPUCC pVCpuAssert = VMMGetCpu(pVM);
|
---|
| 454 | Assert(pVCpuAssert && VMMRZCallRing3IsEnabled(pVCpuAssert));
|
---|
| 455 | #endif
|
---|
[1] | 456 |
|
---|
[20008] | 457 | /*
|
---|
| 458 | * If the critical section has already been destroyed, then inform the caller.
|
---|
| 459 | */
|
---|
[21264] | 460 | AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
|
---|
| 461 | ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
|
---|
| 462 | VERR_SEM_DESTROYED);
|
---|
[1] | 463 |
|
---|
| 464 | /*
|
---|
[20008] | 465 | * See if we're lucky.
|
---|
[1] | 466 | */
|
---|
[37419] | 467 | /* NOP ... */
|
---|
[62147] | 468 | if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
|
---|
| 469 | { /* We're more likely to end up here with real critsects than a NOP one. */ }
|
---|
| 470 | else
|
---|
[37419] | 471 | return VINF_SUCCESS;
|
---|
| 472 |
|
---|
[90346] | 473 | RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
|
---|
[90379] | 474 | AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
|
---|
[37419] | 475 | /* ... not owned ... */
|
---|
[1] | 476 | if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
|
---|
[25478] | 477 | return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
|
---|
[20008] | 478 |
|
---|
| 479 | /* ... or nested. */
|
---|
| 480 | if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
|
---|
[1] | 481 | {
|
---|
[62145] | 482 | Assert(pCritSect->s.Core.cNestings >= 1);
|
---|
| 483 | # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
| 484 | pCritSect->s.Core.cNestings += 1;
|
---|
| 485 | # else
|
---|
| 486 | ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
|
---|
| 487 | # endif
|
---|
[20008] | 488 | ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
|
---|
[90531] | 489 | Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
|
---|
[1] | 490 | return VINF_SUCCESS;
|
---|
| 491 | }
|
---|
| 492 |
|
---|
| 493 | /*
|
---|
[20008] | 494 | * Spin for a bit without incrementing the counter.
|
---|
[1] | 495 | */
|
---|
[20008] | 496 | /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
|
---|
| 497 | * cpu systems. */
|
---|
| 498 | int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
|
---|
| 499 | while (cSpinsLeft-- > 0)
|
---|
[1] | 500 | {
|
---|
[20008] | 501 | if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
|
---|
[25478] | 502 | return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
|
---|
[21591] | 503 | ASMNopPause();
|
---|
[20702] | 504 | /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
|
---|
| 505 | cli'ed pendingpreemption check up front using sti w/ instruction fusing
|
---|
| 506 | for avoiding races. Hmm ... This is assuming the other party is actually
|
---|
[21591] | 507 | executing code on another CPU ... which we could keep track of if we
|
---|
| 508 | wanted. */
|
---|
[1] | 509 | }
|
---|
| 510 |
|
---|
[20008] | 511 | #ifdef IN_RING3
|
---|
[1] | 512 | /*
|
---|
[20008] | 513 | * Take the slow path.
|
---|
[1] | 514 | */
|
---|
[39078] | 515 | NOREF(rcBusy);
|
---|
[90420] | 516 | return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
|
---|
[31392] | 517 |
|
---|
[90379] | 518 | #elif defined(IN_RING0)
|
---|
[90472] | 519 | # if 1 /* new code */
|
---|
[90329] | 520 | /*
|
---|
| 521 | * In ring-0 context we have to take the special VT-x/AMD-V HM context into
|
---|
| 522 | * account when waiting on contended locks.
|
---|
| 523 | *
|
---|
[90608] | 524 | * While we usually (it can be VINF_SUCCESS) have the option of returning
|
---|
| 525 | * rcBusy and force the caller to go back to ring-3 and to re-start the work
|
---|
| 526 | * there, it's almost always more efficient to try wait for the lock here.
|
---|
| 527 | * The rcBusy will be used if we encounter an VERR_INTERRUPTED situation
|
---|
| 528 | * though.
|
---|
[90329] | 529 | */
|
---|
[90379] | 530 | PVMCPUCC pVCpu = VMMGetCpu(pVM);
|
---|
| 531 | if (pVCpu)
|
---|
[31392] | 532 | {
|
---|
[90379] | 533 | VMMR0EMTBLOCKCTX Ctx;
|
---|
| 534 | int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
|
---|
| 535 | if (rc == VINF_SUCCESS)
|
---|
[31392] | 536 | {
|
---|
[90379] | 537 | Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
[31392] | 538 |
|
---|
[90420] | 539 | rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
|
---|
[31392] | 540 |
|
---|
[90379] | 541 | VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
|
---|
[31392] | 542 | }
|
---|
[90379] | 543 | else
|
---|
| 544 | STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
|
---|
[31392] | 545 | return rc;
|
---|
| 546 | }
|
---|
[90379] | 547 |
|
---|
| 548 | /* Non-EMT. */
|
---|
| 549 | Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
[90420] | 550 | return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
|
---|
[90379] | 551 |
|
---|
| 552 | # else /* old code: */
|
---|
[20008] | 553 | /*
|
---|
[31392] | 554 | * We preemption hasn't been disabled, we can block here in ring-0.
|
---|
| 555 | */
|
---|
| 556 | if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
|
---|
| 557 | && ASMIntAreEnabled())
|
---|
[90420] | 558 | return pdmR3R0CritSectEnterContended(pVM, VMMGetCpu(pVM), pCritSect, hNativeSelf, pSrcPos, rcBusy);
|
---|
[31392] | 559 |
|
---|
| 560 | STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
|
---|
| 561 |
|
---|
| 562 | /*
|
---|
[37452] | 563 | * Call ring-3 to acquire the critical section?
|
---|
| 564 | */
|
---|
| 565 | if (rcBusy == VINF_SUCCESS)
|
---|
| 566 | {
|
---|
[90346] | 567 | PVMCPUCC pVCpu = VMMGetCpu(pVM);
|
---|
| 568 | AssertReturn(pVCpu, VERR_PDM_CRITSECT_IPE);
|
---|
[37452] | 569 | return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
|
---|
| 570 | }
|
---|
| 571 |
|
---|
| 572 | /*
|
---|
[20008] | 573 | * Return busy.
|
---|
| 574 | */
|
---|
[13818] | 575 | LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
|
---|
[1] | 576 | return rcBusy;
|
---|
[90379] | 577 | # endif /* old code */
|
---|
| 578 | #else
|
---|
| 579 | # error "Unsupported context"
|
---|
| 580 | #endif
|
---|
[1] | 581 | }
|
---|
| 582 |
|
---|
| 583 |
|
---|
[19439] | 584 | /**
|
---|
[25368] | 585 | * Enters a PDM critical section.
|
---|
[19439] | 586 | *
|
---|
[25368] | 587 | * @returns VINF_SUCCESS if entered successfully.
|
---|
[45152] | 588 | * @returns rcBusy when encountering a busy critical section in RC/R0.
|
---|
| 589 | * @retval VERR_SEM_DESTROYED if the critical section is delete before or
|
---|
| 590 | * during the operation.
|
---|
[25368] | 591 | *
|
---|
[90346] | 592 | * @param pVM The cross context VM structure.
|
---|
[25368] | 593 | * @param pCritSect The PDM critical section to enter.
|
---|
[45152] | 594 | * @param rcBusy The status code to return when we're in RC or R0
|
---|
[37452] | 595 | * and the section is busy. Pass VINF_SUCCESS to
|
---|
| 596 | * acquired the critical section thru a ring-3
|
---|
| 597 | * call if necessary.
|
---|
[90428] | 598 | *
|
---|
| 599 | * @note Even callers setting @a rcBusy to VINF_SUCCESS must either handle
|
---|
[90433] | 600 | * possible failures in ring-0 or apply
|
---|
| 601 | * PDM_CRITSECT_RELEASE_ASSERT_RC(),
|
---|
| 602 | * PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(),
|
---|
| 603 | * PDM_CRITSECT_RELEASE_ASSERT_RC_DRV() or
|
---|
| 604 | * PDM_CRITSECT_RELEASE_ASSERT_RC_USB() to the return value of this
|
---|
| 605 | * function.
|
---|
[25368] | 606 | */
|
---|
[90446] | 607 | VMMDECL(DECL_CHECK_RETURN_NOT_R3(int)) PDMCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy)
|
---|
[25368] | 608 | {
|
---|
| 609 | #ifndef PDMCRITSECT_STRICT
|
---|
[90346] | 610 | return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
|
---|
[25368] | 611 | #else
|
---|
[38042] | 612 | RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
|
---|
[90346] | 613 | return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
|
---|
[25368] | 614 | #endif
|
---|
| 615 | }
|
---|
| 616 |
|
---|
| 617 |
|
---|
| 618 | /**
|
---|
| 619 | * Enters a PDM critical section, with location information for debugging.
|
---|
| 620 | *
|
---|
| 621 | * @returns VINF_SUCCESS if entered successfully.
|
---|
[45152] | 622 | * @returns rcBusy when encountering a busy critical section in RC/R0.
|
---|
| 623 | * @retval VERR_SEM_DESTROYED if the critical section is delete before or
|
---|
| 624 | * during the operation.
|
---|
[25368] | 625 | *
|
---|
[90346] | 626 | * @param pVM The cross context VM structure.
|
---|
[25368] | 627 | * @param pCritSect The PDM critical section to enter.
|
---|
[45152] | 628 | * @param rcBusy The status code to return when we're in RC or R0
|
---|
[37452] | 629 | * and the section is busy. Pass VINF_SUCCESS to
|
---|
| 630 | * acquired the critical section thru a ring-3
|
---|
| 631 | * call if necessary.
|
---|
[25368] | 632 | * @param uId Some kind of locking location ID. Typically a
|
---|
| 633 | * return address up the stack. Optional (0).
|
---|
[58116] | 634 | * @param SRC_POS The source position where to lock is being
|
---|
| 635 | * acquired from. Optional.
|
---|
[25368] | 636 | */
|
---|
[90446] | 637 | VMMDECL(DECL_CHECK_RETURN_NOT_R3(int))
|
---|
| 638 | PDMCritSectEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
|
---|
[25368] | 639 | {
|
---|
| 640 | #ifdef PDMCRITSECT_STRICT
|
---|
[25607] | 641 | RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
|
---|
[90346] | 642 | return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
|
---|
[25368] | 643 | #else
|
---|
[39078] | 644 | NOREF(uId); RT_SRC_POS_NOREF();
|
---|
[90346] | 645 | return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
|
---|
[25368] | 646 | #endif
|
---|
| 647 | }
|
---|
| 648 |
|
---|
| 649 |
|
---|
| 650 | /**
|
---|
| 651 | * Common worker for the debug and normal APIs.
|
---|
| 652 | *
|
---|
[19439] | 653 | * @retval VINF_SUCCESS on success.
|
---|
| 654 | * @retval VERR_SEM_BUSY if the critsect was owned.
|
---|
| 655 | * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
|
---|
[45152] | 656 | * @retval VERR_SEM_DESTROYED if the critical section is delete before or
|
---|
| 657 | * during the operation.
|
---|
[19439] | 658 | *
|
---|
[90346] | 659 | * @param pVM The cross context VM structure.
|
---|
[19439] | 660 | * @param pCritSect The critical section.
|
---|
[58116] | 661 | * @param pSrcPos The source position of the lock operation.
|
---|
[19439] | 662 | */
|
---|
[90346] | 663 | static int pdmCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
|
---|
[19439] | 664 | {
|
---|
[20008] | 665 | /*
|
---|
| 666 | * If the critical section has already been destroyed, then inform the caller.
|
---|
| 667 | */
|
---|
[21264] | 668 | AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
|
---|
| 669 | ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
|
---|
| 670 | VERR_SEM_DESTROYED);
|
---|
[19439] | 671 |
|
---|
| 672 | /*
|
---|
[20008] | 673 | * See if we're lucky.
|
---|
[19439] | 674 | */
|
---|
[37419] | 675 | /* NOP ... */
|
---|
[62147] | 676 | if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
|
---|
| 677 | { /* We're more likely to end up here with real critsects than a NOP one. */ }
|
---|
| 678 | else
|
---|
[37419] | 679 | return VINF_SUCCESS;
|
---|
| 680 |
|
---|
[90346] | 681 | RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
|
---|
[90379] | 682 | AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
|
---|
[37419] | 683 | /* ... not owned ... */
|
---|
[19439] | 684 | if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
|
---|
[25478] | 685 | return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
|
---|
[19439] | 686 |
|
---|
[20008] | 687 | /* ... or nested. */
|
---|
| 688 | if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
|
---|
[19439] | 689 | {
|
---|
[62145] | 690 | Assert(pCritSect->s.Core.cNestings >= 1);
|
---|
| 691 | # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
| 692 | pCritSect->s.Core.cNestings += 1;
|
---|
| 693 | # else
|
---|
| 694 | ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
|
---|
| 695 | # endif
|
---|
[20008] | 696 | ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
|
---|
[90531] | 697 | Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
|
---|
[19439] | 698 | return VINF_SUCCESS;
|
---|
| 699 | }
|
---|
| 700 |
|
---|
[20008] | 701 | /* no spinning */
|
---|
| 702 |
|
---|
[19439] | 703 | /*
|
---|
[20008] | 704 | * Return busy.
|
---|
[19439] | 705 | */
|
---|
[20008] | 706 | #ifdef IN_RING3
|
---|
| 707 | STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
|
---|
| 708 | #else
|
---|
[90379] | 709 | STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
|
---|
[20008] | 710 | #endif
|
---|
[19439] | 711 | LogFlow(("PDMCritSectTryEnter: locked\n"));
|
---|
| 712 | return VERR_SEM_BUSY;
|
---|
| 713 | }
|
---|
| 714 |
|
---|
| 715 |
|
---|
[25368] | 716 | /**
|
---|
| 717 | * Try enter a critical section.
|
---|
| 718 | *
|
---|
| 719 | * @retval VINF_SUCCESS on success.
|
---|
| 720 | * @retval VERR_SEM_BUSY if the critsect was owned.
|
---|
| 721 | * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
|
---|
[45152] | 722 | * @retval VERR_SEM_DESTROYED if the critical section is delete before or
|
---|
| 723 | * during the operation.
|
---|
[25368] | 724 | *
|
---|
[90346] | 725 | * @param pVM The cross context VM structure.
|
---|
[25368] | 726 | * @param pCritSect The critical section.
|
---|
| 727 | */
|
---|
[90449] | 728 | VMMDECL(DECL_CHECK_RETURN(int)) PDMCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect)
|
---|
[25368] | 729 | {
|
---|
| 730 | #ifndef PDMCRITSECT_STRICT
|
---|
[90346] | 731 | return pdmCritSectTryEnter(pVM, pCritSect, NULL);
|
---|
[25368] | 732 | #else
|
---|
[25607] | 733 | RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
|
---|
[90346] | 734 | return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
|
---|
[25368] | 735 | #endif
|
---|
| 736 | }
|
---|
| 737 |
|
---|
| 738 |
|
---|
| 739 | /**
|
---|
| 740 | * Try enter a critical section, with location information for debugging.
|
---|
| 741 | *
|
---|
| 742 | * @retval VINF_SUCCESS on success.
|
---|
| 743 | * @retval VERR_SEM_BUSY if the critsect was owned.
|
---|
| 744 | * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
|
---|
[45152] | 745 | * @retval VERR_SEM_DESTROYED if the critical section is delete before or
|
---|
| 746 | * during the operation.
|
---|
[25368] | 747 | *
|
---|
[90346] | 748 | * @param pVM The cross context VM structure.
|
---|
[25368] | 749 | * @param pCritSect The critical section.
|
---|
| 750 | * @param uId Some kind of locking location ID. Typically a
|
---|
| 751 | * return address up the stack. Optional (0).
|
---|
[58116] | 752 | * @param SRC_POS The source position where to lock is being
|
---|
| 753 | * acquired from. Optional.
|
---|
[25368] | 754 | */
|
---|
[90449] | 755 | VMMDECL(DECL_CHECK_RETURN(int))
|
---|
[90446] | 756 | PDMCritSectTryEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
|
---|
[25368] | 757 | {
|
---|
| 758 | #ifdef PDMCRITSECT_STRICT
|
---|
[25607] | 759 | RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
|
---|
[90346] | 760 | return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
|
---|
[25368] | 761 | #else
|
---|
[39078] | 762 | NOREF(uId); RT_SRC_POS_NOREF();
|
---|
[90346] | 763 | return pdmCritSectTryEnter(pVM, pCritSect, NULL);
|
---|
[25368] | 764 | #endif
|
---|
| 765 | }
|
---|
| 766 |
|
---|
| 767 |
|
---|
[19439] | 768 | #ifdef IN_RING3
|
---|
[1] | 769 | /**
|
---|
[8677] | 770 | * Enters a PDM critical section.
|
---|
| 771 | *
|
---|
| 772 | * @returns VINF_SUCCESS if entered successfully.
|
---|
| 773 | * @returns rcBusy when encountering a busy critical section in GC/R0.
|
---|
[45152] | 774 | * @retval VERR_SEM_DESTROYED if the critical section is delete before or
|
---|
| 775 | * during the operation.
|
---|
[8677] | 776 | *
|
---|
[90346] | 777 | * @param pVM The cross context VM structure.
|
---|
[8677] | 778 | * @param pCritSect The PDM critical section to enter.
|
---|
[20874] | 779 | * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
|
---|
[8677] | 780 | */
|
---|
[90346] | 781 | VMMR3DECL(int) PDMR3CritSectEnterEx(PVM pVM, PPDMCRITSECT pCritSect, bool fCallRing3)
|
---|
[8677] | 782 | {
|
---|
[90346] | 783 | int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
|
---|
[8677] | 784 | if ( rc == VINF_SUCCESS
|
---|
[20874] | 785 | && fCallRing3
|
---|
[25368] | 786 | && pCritSect->s.Core.pValidatorRec
|
---|
| 787 | && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
|
---|
[25614] | 788 | RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
|
---|
[8677] | 789 | return rc;
|
---|
| 790 | }
|
---|
| 791 | #endif /* IN_RING3 */
|
---|
| 792 |
|
---|
| 793 |
|
---|
| 794 | /**
|
---|
[1] | 795 | * Leaves a critical section entered with PDMCritSectEnter().
|
---|
| 796 | *
|
---|
[38944] | 797 | * @returns Indication whether we really exited the critical section.
|
---|
| 798 | * @retval VINF_SUCCESS if we really exited.
|
---|
| 799 | * @retval VINF_SEM_NESTED if we only reduced the nesting count.
|
---|
| 800 | * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
|
---|
| 801 | *
|
---|
[90346] | 802 | * @param pVM The cross context VM structure.
|
---|
| 803 | * @param pCritSect The PDM critical section to leave.
|
---|
[90504] | 804 | *
|
---|
| 805 | * @remarks Can be called from no-ring-3-call context in ring-0 (TM/VirtualSync)
|
---|
| 806 | * where we'll queue leaving operation for ring-3 processing.
|
---|
[1] | 807 | */
|
---|
[90346] | 808 | VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
|
---|
[1] | 809 | {
|
---|
[21264] | 810 | AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
|
---|
[20008] | 811 | Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
|
---|
| 812 |
|
---|
[90504] | 813 | /*
|
---|
| 814 | * Check for NOP sections before asserting ownership.
|
---|
| 815 | */
|
---|
[62147] | 816 | if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
|
---|
| 817 | { /* We're more likely to end up here with real critsects than a NOP one. */ }
|
---|
| 818 | else
|
---|
[38944] | 819 | return VINF_SUCCESS;
|
---|
[37419] | 820 |
|
---|
[38035] | 821 | /*
|
---|
| 822 | * Always check that the caller is the owner (screw performance).
|
---|
| 823 | */
|
---|
[90346] | 824 | RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
|
---|
[90531] | 825 | VMM_ASSERT_RELEASE_MSG_RETURN(pVM, pCritSect->s.Core.NativeThreadOwner == hNativeSelf && hNativeSelf != NIL_RTNATIVETHREAD,
|
---|
[90515] | 826 | ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
|
---|
| 827 | pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
|
---|
| 828 | pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
|
---|
| 829 | VERR_NOT_OWNER);
|
---|
[37443] | 830 |
|
---|
[37419] | 831 | /*
|
---|
[20008] | 832 | * Nested leave.
|
---|
| 833 | */
|
---|
[62145] | 834 | int32_t const cNestings = pCritSect->s.Core.cNestings;
|
---|
| 835 | Assert(cNestings >= 1);
|
---|
| 836 | if (cNestings > 1)
|
---|
[20008] | 837 | {
|
---|
[90504] | 838 | #ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
[62145] | 839 | pCritSect->s.Core.cNestings = cNestings - 1;
|
---|
[90504] | 840 | #else
|
---|
[62145] | 841 | ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
|
---|
[90504] | 842 | #endif
|
---|
[90531] | 843 | int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
|
---|
| 844 | Assert(cLockers >= 0); RT_NOREF(cLockers);
|
---|
| 845 | Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, cNestings - 1, cLockers));
|
---|
[38944] | 846 | return VINF_SEM_NESTED;
|
---|
[20008] | 847 | }
|
---|
| 848 |
|
---|
[90531] | 849 | Log12Func(("%p: cNestings=%d cLockers=%d hOwner=%p - leave for real\n",
|
---|
| 850 | pCritSect, cNestings, pCritSect->s.Core.cLockers, pCritSect->s.Core.NativeThreadOwner));
|
---|
[90504] | 851 |
|
---|
| 852 | #ifdef IN_RING3
|
---|
| 853 | /*
|
---|
| 854 | * Ring-3: Leave for real.
|
---|
| 855 | */
|
---|
| 856 | SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
|
---|
| 857 | pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
|
---|
| 858 |
|
---|
[90657] | 859 | # if defined(PDMCRITSECT_STRICT)
|
---|
[90504] | 860 | if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
|
---|
| 861 | RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
|
---|
[90657] | 862 | # endif
|
---|
[90504] | 863 | Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
|
---|
| 864 |
|
---|
[62145] | 865 | # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
[90504] | 866 | //pCritSect->s.Core.cNestings = 0; /* not really needed */
|
---|
| 867 | pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
|
---|
| 868 | # else
|
---|
| 869 | ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
|
---|
| 870 | ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
|
---|
| 871 | # endif
|
---|
| 872 | ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
|
---|
| 873 |
|
---|
| 874 | /* Stop profiling and decrement lockers. */
|
---|
| 875 | STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
|
---|
| 876 | ASMCompilerBarrier();
|
---|
| 877 | int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
|
---|
| 878 | if (cLockers < 0)
|
---|
| 879 | AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
|
---|
| 880 | else
|
---|
| 881 | {
|
---|
| 882 | /* Someone is waiting, wake up one of them. */
|
---|
| 883 | Assert(cLockers < _8K);
|
---|
[90531] | 884 | Log8(("PDMCritSectLeave: Waking up %p (cLockers=%u)\n", pCritSect, cLockers));
|
---|
[90504] | 885 | SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
|
---|
| 886 | int rc = SUPSemEventSignal(pVM->pSession, hEvent);
|
---|
| 887 | AssertRC(rc);
|
---|
| 888 | }
|
---|
| 889 |
|
---|
| 890 | /* Signal exit event. */
|
---|
| 891 | if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
|
---|
| 892 | { /* likely */ }
|
---|
| 893 | else
|
---|
| 894 | {
|
---|
[90531] | 895 | Log8(("PDMCritSectLeave: Signalling %#p (%p)\n", hEventToSignal, pCritSect));
|
---|
[90504] | 896 | int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
|
---|
| 897 | AssertRC(rc);
|
---|
| 898 | }
|
---|
| 899 |
|
---|
| 900 | return VINF_SUCCESS;
|
---|
| 901 |
|
---|
| 902 |
|
---|
| 903 | #elif defined(IN_RING0)
|
---|
| 904 | /*
|
---|
| 905 | * Ring-0: Try leave for real, depends on host and context.
|
---|
| 906 | */
|
---|
| 907 | SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
|
---|
| 908 | pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
|
---|
| 909 | PVMCPUCC pVCpu = VMMGetCpu(pVM);
|
---|
[90558] | 910 | bool fQueueOnTrouble = false; /* Set this to true to test queueing. */
|
---|
[90504] | 911 | if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
|
---|
| 912 | || VMMRZCallRing3IsEnabled(pVCpu)
|
---|
| 913 | || RTSemEventIsSignalSafe()
|
---|
[90558] | 914 | || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
|
---|
| 915 | && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
|
---|
| 916 | && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
|
---|
[90504] | 917 | || (fQueueOnTrouble = ( hEventToSignal == NIL_SUPSEMEVENT
|
---|
| 918 | && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) == 0)) )
|
---|
| 919 | {
|
---|
| 920 | pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
|
---|
| 921 |
|
---|
| 922 | # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
[62145] | 923 | //pCritSect->s.Core.cNestings = 0; /* not really needed */
|
---|
| 924 | pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
|
---|
| 925 | # else
|
---|
| 926 | ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
|
---|
| 927 | ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
|
---|
| 928 | # endif
|
---|
[22039] | 929 | ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
|
---|
[20008] | 930 |
|
---|
[90504] | 931 | /*
|
---|
| 932 | * Stop profiling and decrement lockers.
|
---|
| 933 | */
|
---|
[22039] | 934 | STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
|
---|
| 935 | ASMCompilerBarrier();
|
---|
[90504] | 936 |
|
---|
| 937 | bool fQueueIt = false;
|
---|
| 938 | int32_t cLockers;
|
---|
| 939 | if (!fQueueOnTrouble)
|
---|
| 940 | cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
|
---|
[62145] | 941 | else
|
---|
[22039] | 942 | {
|
---|
[90504] | 943 | cLockers = -1;
|
---|
| 944 | if (!ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
|
---|
| 945 | fQueueIt = true;
|
---|
[22039] | 946 | }
|
---|
[90504] | 947 | if (!fQueueIt)
|
---|
| 948 | {
|
---|
| 949 | VMMR0EMTBLOCKCTX Ctx;
|
---|
| 950 | bool fLeaveCtx = false;
|
---|
| 951 | if (cLockers < 0)
|
---|
| 952 | AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
|
---|
| 953 | else
|
---|
| 954 | {
|
---|
| 955 | /* Someone is waiting, wake up one of them. */
|
---|
| 956 | Assert(cLockers < _8K);
|
---|
| 957 | SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
|
---|
| 958 | if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
|
---|
| 959 | {
|
---|
| 960 | int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
|
---|
[90515] | 961 | VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
|
---|
[90504] | 962 | fLeaveCtx = true;
|
---|
| 963 | }
|
---|
| 964 | int rc = SUPSemEventSignal(pVM->pSession, hEvent);
|
---|
| 965 | AssertRC(rc);
|
---|
| 966 | }
|
---|
[20008] | 967 |
|
---|
[90504] | 968 | /*
|
---|
| 969 | * Signal exit event.
|
---|
| 970 | */
|
---|
| 971 | if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
|
---|
| 972 | { /* likely */ }
|
---|
| 973 | else
|
---|
| 974 | {
|
---|
| 975 | if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
|
---|
| 976 | {
|
---|
| 977 | int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
|
---|
[90515] | 978 | VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
|
---|
[90504] | 979 | fLeaveCtx = true;
|
---|
| 980 | }
|
---|
| 981 | Log8(("Signalling %#p\n", hEventToSignal));
|
---|
| 982 | int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
|
---|
| 983 | AssertRC(rc);
|
---|
| 984 | }
|
---|
| 985 |
|
---|
| 986 | /*
|
---|
| 987 | * Restore HM context if needed.
|
---|
| 988 | */
|
---|
| 989 | if (!fLeaveCtx)
|
---|
| 990 | { /* contention should be unlikely */ }
|
---|
| 991 | else
|
---|
| 992 | VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
|
---|
| 993 |
|
---|
[92408] | 994 | # ifdef DEBUG_bird
|
---|
[90504] | 995 | VMMTrashVolatileXMMRegs();
|
---|
[92408] | 996 | # endif
|
---|
[91808] | 997 | return VINF_SUCCESS;
|
---|
[22039] | 998 | }
|
---|
[1] | 999 |
|
---|
[22039] | 1000 | /*
|
---|
[90504] | 1001 | * Darn, someone raced in on us. Restore the state (this works only
|
---|
| 1002 | * because the semaphore is effectively controlling ownership).
|
---|
[22039] | 1003 | */
|
---|
[90504] | 1004 | bool fRc;
|
---|
| 1005 | RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
|
---|
| 1006 | ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
|
---|
| 1007 | AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
|
---|
| 1008 | pdmCritSectCorrupted(pCritSect, "owner race"));
|
---|
| 1009 | STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
|
---|
[62145] | 1010 | # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
[90504] | 1011 | //pCritSect->s.Core.cNestings = 1;
|
---|
| 1012 | Assert(pCritSect->s.Core.cNestings == 1);
|
---|
[62145] | 1013 | # else
|
---|
[90504] | 1014 | //Assert(pCritSect->s.Core.cNestings == 0);
|
---|
| 1015 | ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
|
---|
[62145] | 1016 | # endif
|
---|
[90504] | 1017 | Assert(hEventToSignal == NIL_SUPSEMEVENT);
|
---|
| 1018 | }
|
---|
[20008] | 1019 |
|
---|
[1] | 1020 |
|
---|
[90504] | 1021 | #else /* IN_RC */
|
---|
| 1022 | /*
|
---|
| 1023 | * Raw-mode: Try leave it.
|
---|
| 1024 | */
|
---|
| 1025 | # error "This context is not use..."
|
---|
| 1026 | if (pCritSect->s.Core.cLockers == 0)
|
---|
| 1027 | {
|
---|
[62145] | 1028 | # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
[90504] | 1029 | //pCritSect->s.Core.cNestings = 0; /* not really needed */
|
---|
[62145] | 1030 | # else
|
---|
[90504] | 1031 | ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
|
---|
[62145] | 1032 | # endif
|
---|
[90504] | 1033 | ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
|
---|
| 1034 | STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
|
---|
[22039] | 1035 |
|
---|
[90504] | 1036 | ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
|
---|
| 1037 | if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
|
---|
| 1038 | return VINF_SUCCESS;
|
---|
| 1039 |
|
---|
[22039] | 1040 | /*
|
---|
[90504] | 1041 | * Darn, someone raced in on us. Restore the state (this works only
|
---|
| 1042 | * because the semaphore is effectively controlling ownership).
|
---|
[22039] | 1043 | */
|
---|
[90504] | 1044 | bool fRc;
|
---|
| 1045 | RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
|
---|
| 1046 | ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
|
---|
| 1047 | AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
|
---|
| 1048 | pdmCritSectCorrupted(pCritSect, "owner race"));
|
---|
| 1049 | STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
|
---|
| 1050 | # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
|
---|
| 1051 | //pCritSect->s.Core.cNestings = 1;
|
---|
| 1052 | Assert(pCritSect->s.Core.cNestings == 1);
|
---|
| 1053 | # else
|
---|
| 1054 | //Assert(pCritSect->s.Core.cNestings == 0);
|
---|
| 1055 | ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
|
---|
| 1056 | # endif
|
---|
[1] | 1057 | }
|
---|
[90504] | 1058 | #endif /* IN_RC */
|
---|
[38944] | 1059 |
|
---|
[90504] | 1060 |
|
---|
| 1061 | #ifndef IN_RING3
|
---|
| 1062 | /*
|
---|
| 1063 | * Ring-0/raw-mode: Unable to leave. Queue the leave for ring-3.
|
---|
| 1064 | */
|
---|
| 1065 | ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
|
---|
| 1066 | # ifndef IN_RING0
|
---|
| 1067 | PVMCPUCC pVCpu = VMMGetCpu(pVM);
|
---|
| 1068 | # endif
|
---|
| 1069 | uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
|
---|
| 1070 | LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
|
---|
[90515] | 1071 | VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves), ("%d\n", i), VERR_PDM_CRITSECT_IPE);
|
---|
[90504] | 1072 | pVCpu->pdm.s.apQueuedCritSectLeaves[i] = pCritSect->s.pSelfR3;
|
---|
[90572] | 1073 | VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
|
---|
| 1074 | RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectLeaves[i])
|
---|
[93554] | 1075 | && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectLeaves[i] & HOST_PAGE_OFFSET_MASK)
|
---|
| 1076 | == ((uintptr_t)pCritSect & HOST_PAGE_OFFSET_MASK),
|
---|
[90572] | 1077 | ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectLeaves[i], pCritSect),
|
---|
| 1078 | pdmCritSectCorrupted(pCritSect, "Invalid pSelfR3 value"));
|
---|
[90504] | 1079 | VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); /** @todo handle VMCPU_FF_PDM_CRITSECT in ring-0 outside the no-call-ring-3 part. */
|
---|
| 1080 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* unnecessary paranoia */
|
---|
| 1081 | STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
|
---|
| 1082 | STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
|
---|
| 1083 |
|
---|
[38944] | 1084 | return VINF_SUCCESS;
|
---|
[90504] | 1085 | #endif /* IN_RING3 */
|
---|
[1] | 1086 | }
|
---|
| 1087 |
|
---|
| 1088 |
|
---|
[56402] | 1089 | #if defined(IN_RING0) || defined(IN_RING3)
|
---|
[1] | 1090 | /**
|
---|
[56402] | 1091 | * Schedule a event semaphore for signalling upon critsect exit.
|
---|
| 1092 | *
|
---|
| 1093 | * @returns VINF_SUCCESS on success.
|
---|
| 1094 | * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
|
---|
| 1095 | * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
|
---|
| 1096 | * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
|
---|
| 1097 | *
|
---|
| 1098 | * @param pCritSect The critical section.
|
---|
| 1099 | * @param hEventToSignal The support driver event semaphore that should be
|
---|
| 1100 | * signalled.
|
---|
| 1101 | */
|
---|
| 1102 | VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
|
---|
| 1103 | {
|
---|
| 1104 | AssertPtr(pCritSect);
|
---|
| 1105 | Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
|
---|
| 1106 | Assert(hEventToSignal != NIL_SUPSEMEVENT);
|
---|
| 1107 | # ifdef IN_RING3
|
---|
| 1108 | if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
|
---|
| 1109 | return VERR_NOT_OWNER;
|
---|
| 1110 | # endif
|
---|
| 1111 | if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
|
---|
| 1112 | || pCritSect->s.hEventToSignal == hEventToSignal))
|
---|
| 1113 | {
|
---|
| 1114 | pCritSect->s.hEventToSignal = hEventToSignal;
|
---|
| 1115 | return VINF_SUCCESS;
|
---|
| 1116 | }
|
---|
| 1117 | return VERR_TOO_MANY_SEMAPHORES;
|
---|
| 1118 | }
|
---|
| 1119 | #endif /* IN_RING0 || IN_RING3 */
|
---|
| 1120 |
|
---|
| 1121 |
|
---|
| 1122 | /**
|
---|
[1] | 1123 | * Checks the caller is the owner of the critical section.
|
---|
| 1124 | *
|
---|
| 1125 | * @returns true if owner.
|
---|
| 1126 | * @returns false if not owner.
|
---|
[90346] | 1127 | * @param pVM The cross context VM structure.
|
---|
[1] | 1128 | * @param pCritSect The critical section.
|
---|
| 1129 | */
|
---|
[90346] | 1130 | VMMDECL(bool) PDMCritSectIsOwner(PVMCC pVM, PCPDMCRITSECT pCritSect)
|
---|
[1] | 1131 | {
|
---|
| 1132 | #ifdef IN_RING3
|
---|
[90346] | 1133 | RT_NOREF(pVM);
|
---|
[1] | 1134 | return RTCritSectIsOwner(&pCritSect->s.Core);
|
---|
| 1135 | #else
|
---|
[90346] | 1136 | PVMCPUCC pVCpu = VMMGetCpu(pVM);
|
---|
| 1137 | if ( !pVCpu
|
---|
| 1138 | || pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
|
---|
[19596] | 1139 | return false;
|
---|
[37582] | 1140 | return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
|
---|
| 1141 | || pCritSect->s.Core.cNestings > 1;
|
---|
[1] | 1142 | #endif
|
---|
| 1143 | }
|
---|
| 1144 |
|
---|
[20008] | 1145 |
|
---|
[19262] | 1146 | /**
|
---|
| 1147 | * Checks the specified VCPU is the owner of the critical section.
|
---|
| 1148 | *
|
---|
| 1149 | * @returns true if owner.
|
---|
| 1150 | * @returns false if not owner.
|
---|
[90346] | 1151 | * @param pVCpu The cross context virtual CPU structure.
|
---|
[19262] | 1152 | * @param pCritSect The critical section.
|
---|
| 1153 | */
|
---|
[90346] | 1154 | VMMDECL(bool) PDMCritSectIsOwnerEx(PVMCPUCC pVCpu, PCPDMCRITSECT pCritSect)
|
---|
[19262] | 1155 | {
|
---|
| 1156 | #ifdef IN_RING3
|
---|
[37582] | 1157 | NOREF(pVCpu);
|
---|
[19262] | 1158 | return RTCritSectIsOwner(&pCritSect->s.Core);
|
---|
| 1159 | #else
|
---|
[80268] | 1160 | Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
|
---|
[37582] | 1161 | if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
|
---|
| 1162 | return false;
|
---|
| 1163 | return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
|
---|
| 1164 | || pCritSect->s.Core.cNestings > 1;
|
---|
[19262] | 1165 | #endif
|
---|
| 1166 | }
|
---|
[4403] | 1167 |
|
---|
[20008] | 1168 |
|
---|
[19471] | 1169 | /**
|
---|
[23350] | 1170 | * Checks if anyone is waiting on the critical section we own.
|
---|
| 1171 | *
|
---|
[33540] | 1172 | * @returns true if someone is waiting.
|
---|
[23350] | 1173 | * @returns false if no one is waiting.
|
---|
[90346] | 1174 | * @param pVM The cross context VM structure.
|
---|
[23350] | 1175 | * @param pCritSect The critical section.
|
---|
| 1176 | */
|
---|
[90346] | 1177 | VMMDECL(bool) PDMCritSectHasWaiters(PVMCC pVM, PCPDMCRITSECT pCritSect)
|
---|
[23350] | 1178 | {
|
---|
| 1179 | AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
|
---|
[90346] | 1180 | Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pVM, pCritSect)); RT_NOREF(pVM);
|
---|
[23350] | 1181 | return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
|
---|
| 1182 | }
|
---|
| 1183 |
|
---|
| 1184 |
|
---|
| 1185 | /**
|
---|
[4403] | 1186 | * Checks if a critical section is initialized or not.
|
---|
| 1187 | *
|
---|
| 1188 | * @returns true if initialized.
|
---|
| 1189 | * @returns false if not initialized.
|
---|
| 1190 | * @param pCritSect The critical section.
|
---|
| 1191 | */
|
---|
[12989] | 1192 | VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
|
---|
[4403] | 1193 | {
|
---|
[20008] | 1194 | return RTCritSectIsInitialized(&pCritSect->s.Core);
|
---|
[4403] | 1195 | }
|
---|
| 1196 |
|
---|
[19590] | 1197 |
|
---|
| 1198 | /**
|
---|
| 1199 | * Gets the recursion depth.
|
---|
| 1200 | *
|
---|
| 1201 | * @returns The recursion depth.
|
---|
| 1202 | * @param pCritSect The critical section.
|
---|
| 1203 | */
|
---|
| 1204 | VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
|
---|
| 1205 | {
|
---|
| 1206 | return RTCritSectGetRecursion(&pCritSect->s.Core);
|
---|
| 1207 | }
|
---|
[45152] | 1208 |
|
---|