VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/SharedFolders/driver/file.cpp

Last change on this file was 98103, checked in by vboxsync, 16 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.5 KB
Line 
1/* $Id: file.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * VirtualBox Windows Guest Shared Folders - File System Driver file routines.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#include "vbsf.h"
33#include <iprt/fs.h>
34#include <iprt/mem.h>
35
36
37/*********************************************************************************************************************************
38* Defined Constants And Macros *
39*********************************************************************************************************************************/
40/** How many pages we should try transfer in one I/O request (read/write). */
41#define VBSF_MAX_IO_PAGES RT_MIN(_16K / sizeof(RTGCPHYS64) /* => 8MB buffer */, VMMDEV_MAX_HGCM_DATA_SIZE >> PAGE_SHIFT)
42
43
44
45
46/** @name HACK ALERT! Using the better CcCoherencyFlushAndPurgeCache when
47 * available (>= Windows 7) and avoid flushing+purging cache twice.
48 *
49 * We change the cache flushing and purging related imports from the write.obj
50 * and read.obj files in the rdbsslib.lib to import so these gets redirected
51 * here instead of going directly to ntoskrnl. We will use
52 * CcCoherencyFlushAndPurgeCache when present, and on older systems there will
53 * be no change. This does however save us from doing double flushing and
54 * purging on newer systems.
55 *
56 * If we don't use CcCoherencyFlushAndPurgeCache we end up not seeing newly
57 * written data in memory mappings, and similarlly not seeing data from freshly
58 * dirtied (but as yet unflushed) memory mapping pages when reading. (Both
59 * these scenarios are tested by FsPerf --mmap.)
60 *
61 * See VBoxEditCoffLib and the Makefile.kmk for the rest of the puzzle.
62 *
63 * @todo investigate whether we could do it the same way as we do on linux,
64 * where we iterrogate the cache and use cached data when memory mappings
65 * are active. Only troubles are:
66 *
67 * 1. Don't know how to find out whether we've got memory mappings.
68 *
69 * 2. Don't know how to detect dirty pages (we should only read
70 * from dirty ones).
71 *
72 * To really explore this, it would be best to introduce a caching mode
73 * mount parameter (or something) analogous to what we have on linux. In
74 * the relaxed mode, we could get away with more as users could always
75 * disable caching...
76 * @{
77 */
78
79/** For reads. */
80static VOID NTAPI vbsfNtReadCcFlushCache(PSECTION_OBJECT_POINTERS pSectObjPtrs, PLARGE_INTEGER poffFlush, ULONG cbFlush,
81 PIO_STATUS_BLOCK pIos)
82{
83 if (g_pfnCcCoherencyFlushAndPurgeCache)
84 g_pfnCcCoherencyFlushAndPurgeCache(pSectObjPtrs, poffFlush, cbFlush, pIos, CC_FLUSH_AND_PURGE_NO_PURGE);
85 else
86 CcFlushCache(pSectObjPtrs, poffFlush, cbFlush, pIos);
87}
88
89
90/**
91 * For writes with mmapping/caching section, called before the purging.
92 *
93 * This does both flushing and puring when CcCoherencyFlushAndPurgeCache is
94 * available.
95 */
96static VOID NTAPI vbsfNtWriteCcFlushCache(PSECTION_OBJECT_POINTERS pSectObjPtrs, PLARGE_INTEGER poffFlush, ULONG cbFlush,
97 PIO_STATUS_BLOCK pIos)
98{
99 if (g_pfnCcCoherencyFlushAndPurgeCache)
100 g_pfnCcCoherencyFlushAndPurgeCache(pSectObjPtrs, poffFlush, cbFlush, pIos, 0 /*fFlags*/);
101 else
102 CcFlushCache(pSectObjPtrs, poffFlush, cbFlush, pIos);
103}
104
105
106/**
107 * For writes with mmapping/caching section, called to purge after flushing.
108 *
109 * We translate this to a no-op when CcCoherencyFlushAndPurgeCache is available.
110 */
111static BOOLEAN NTAPI vbsfNtWriteCcPurgeCacheSection(PSECTION_OBJECT_POINTERS pSectObjPtrs, PLARGE_INTEGER poffPurge,ULONG cbPurge,
112#if (NTDDI_VERSION >= NTDDI_VISTA)
113 ULONG fUninitializeCacheMaps)
114#else
115 BOOLEAN fUninitializeCacheMaps)
116#endif
117{
118#if (NTDDI_VERSION >= NTDDI_VISTA)
119 fUninitializeCacheMaps &= 0xff; /* Used to be BOOLEAN before Vista. */
120#endif
121 Assert(fUninitializeCacheMaps == 0);
122 BOOLEAN fRet;
123 if (g_pfnCcCoherencyFlushAndPurgeCache)
124 fRet = TRUE;
125 else
126 fRet = CcPurgeCacheSection(pSectObjPtrs, poffPurge, cbPurge, fUninitializeCacheMaps);
127 return fRet;
128}
129
130extern "C" {
131/** This is what read.obj gets instead of __imp_CcFlushCache. */
132decltype(CcFlushCache) *g_pfnRdFlushCache = vbsfNtReadCcFlushCache;
133/** This is what write.obj gets instead of __imp_CcFlushCache. */
134decltype(CcFlushCache) *g_pfnWrFlushCache = vbsfNtWriteCcFlushCache;
135/** This is what write.obj gets instead of __imp_CcPurgeCacheSection. */
136decltype(CcPurgeCacheSection) *g_pfnWrPurgeCacheSection = vbsfNtWriteCcPurgeCacheSection;
137}
138
139/** @} */
140
141
142
143/**
144 * Performs a read.
145 *
146 * @note Almost identical to vbsfNtWriteWorker.
147 */
148static NTSTATUS vbsfNtReadWorker(PRX_CONTEXT RxContext)
149{
150 RxCaptureFcb;
151 RxCaptureFobx;
152 PMRX_VBOX_NETROOT_EXTENSION pNetRootX = VBoxMRxGetNetRootExtension(capFcb->pNetRoot);
153 PVBSFNTFCBEXT pVBoxFcbX = VBoxMRxGetFcbExtension(capFcb);
154 PMRX_VBOX_FOBX pVBoxFobX = VBoxMRxGetFileObjectExtension(capFobx);
155 PMDL pBufferMdl = RxContext->LowIoContext.ParamsFor.ReadWrite.Buffer;
156
157 LogFlow(("vbsfNtReadWorker: hFile=%#RX64 offFile=%#RX64 cbToRead=%#x %s\n", pVBoxFobX->hFile,
158 RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset, RxContext->LowIoContext.ParamsFor.ReadWrite.ByteCount,
159 RxContext->Flags & RX_CONTEXT_FLAG_ASYNC_OPERATION ? " async" : "sync"));
160
161 AssertReturn(pBufferMdl, STATUS_INTERNAL_ERROR);
162
163
164 /*
165 * We should never get a zero byte request (RDBSS checks), but in case we
166 * do, it should succeed.
167 */
168 uint32_t cbRet = 0;
169 uint32_t cbLeft = RxContext->LowIoContext.ParamsFor.ReadWrite.ByteCount;
170 AssertReturnStmt(cbLeft > 0, RxContext->InformationToReturn = 0, STATUS_SUCCESS);
171
172 Assert(cbLeft <= MmGetMdlByteCount(pBufferMdl));
173
174 /*
175 * Allocate a request buffer.
176 */
177 uint32_t cPagesLeft = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(pBufferMdl), cbLeft);
178 uint32_t cMaxPages = RT_MIN(cPagesLeft, VBSF_MAX_IO_PAGES);
179 VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
180 PgLst.aPages[cMaxPages]));
181 while (!pReq && cMaxPages > 4)
182 {
183 cMaxPages /= 2;
184 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
185 }
186 NTSTATUS rcNt = STATUS_SUCCESS;
187 if (pReq)
188 {
189 /*
190 * The read loop.
191 */
192 RTFOFF offFile = RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset;
193 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pBufferMdl);
194 uint32_t offPage = MmGetMdlByteOffset(pBufferMdl);
195 if (offPage < PAGE_SIZE)
196 { /* likely */ }
197 else
198 {
199 paPfns += offPage >> PAGE_SHIFT;
200 offPage &= PAGE_OFFSET_MASK;
201 }
202
203 for (;;)
204 {
205 /*
206 * Figure out how much to process now and set up the page list for it.
207 */
208 uint32_t cPagesInChunk;
209 uint32_t cbChunk;
210 if (cPagesLeft <= cMaxPages)
211 {
212 cPagesInChunk = cPagesLeft;
213 cbChunk = cbLeft;
214 }
215 else
216 {
217 cPagesInChunk = cMaxPages;
218 cbChunk = (cMaxPages << PAGE_SHIFT) - offPage;
219 }
220
221 size_t iPage = cPagesInChunk;
222 while (iPage-- > 0)
223 pReq->PgLst.aPages[iPage] = (RTGCPHYS)paPfns[iPage] << PAGE_SHIFT;
224 pReq->PgLst.offFirstPage = offPage;
225
226#if 0 /* Instead we hook into read.obj's import function pointers to do this more efficiently. */
227 /*
228 * Flush dirty cache content before we try read it from the host. RDBSS calls
229 * CcFlushCache before it calls us, I think, but CcCoherencyFlushAndPurgeCache
230 * does the right thing whereas CcFlushCache clearly does (FsPerf mmap+read
231 * coherency test fails consistently on W10, XP, ++).
232 */
233 if ( g_pfnCcCoherencyFlushAndPurgeCache
234 && !(RxContext->CurrentIrp && (RxContext->CurrentIrp->Flags & IRP_PAGING_IO))
235 && RxContext->NonPagedFcb != NULL
236 && RxContext->NonPagedFcb->SectionObjectPointers.DataSectionObject != NULL)
237 {
238 LARGE_INTEGER offFlush;
239 offFlush.QuadPart = offFile;
240 Assert(!RxContext->FcbPagingIoResourceAcquired);
241 BOOLEAN AcquiredFile = RxAcquirePagingIoResourceShared(NULL, capFcb, 1 /*fWait*/);
242 g_pfnCcCoherencyFlushAndPurgeCache(&RxContext->NonPagedFcb->SectionObjectPointers, &offFlush, cbChunk,
243 &RxContext->CurrentIrp->IoStatus, CC_FLUSH_AND_PURGE_NO_PURGE);
244 if (AcquiredFile)
245 { RxReleasePagingIoResource(NULL, capFcb); /* requires {} */ }
246 }
247#endif
248
249 /*
250 * Issue the request and unlock the pages.
251 */
252 int vrc = VbglR0SfHostReqReadPgLst(pNetRootX->map.root, pReq, pVBoxFobX->hFile, offFile, cbChunk, cPagesInChunk);
253 if (RT_SUCCESS(vrc))
254 {
255 /*
256 * Success, advance position and buffer.
257 */
258 uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
259 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
260 cbRet += cbActual;
261 offFile += cbActual;
262 cbLeft -= cbActual;
263
264 /*
265 * Update timestamp state (FCB is shared).
266 */
267 pVBoxFobX->fTimestampsImplicitlyUpdated |= VBOX_FOBX_F_INFO_LASTACCESS_TIME;
268 if (pVBoxFcbX->pFobxLastAccessTime != pVBoxFobX)
269 pVBoxFcbX->pFobxLastAccessTime = NULL;
270
271 /*
272 * Are we done already?
273 */
274 if (!cbLeft || cbActual < cbChunk)
275 {
276 /*
277 * Flag EOF.
278 */
279 if (cbActual != 0 || cbRet != 0)
280 { /* typical */ }
281 else
282 rcNt = STATUS_END_OF_FILE;
283
284 /*
285 * See if we've reached the EOF early or read beyond what we thought were the EOF.
286 *
287 * Note! We don't dare do this (yet) if we're in paging I/O as we then hold the
288 * PagingIoResource in shared mode and would probably deadlock in the
289 * updating code when taking the lock in exclusive mode.
290 */
291 if (RxContext->LowIoContext.Resource != capFcb->Header.PagingIoResource)
292 {
293 LONGLONG cbFileRdbss;
294 RxGetFileSizeWithLock((PFCB)capFcb, &cbFileRdbss);
295 if ( offFile < cbFileRdbss
296 && cbActual < cbChunk /* hit EOF */)
297 vbsfNtUpdateFcbSize(RxContext->pFobx->AssociatedFileObject, capFcb, pVBoxFobX, offFile, cbFileRdbss, -1);
298 else if (offFile > cbFileRdbss)
299 vbsfNtQueryAndUpdateFcbSize(pNetRootX, RxContext->pFobx->AssociatedFileObject,
300 pVBoxFobX, capFcb, pVBoxFcbX);
301 }
302 break;
303 }
304
305 /*
306 * More to read, advance page related variables and loop.
307 */
308 paPfns += cPagesInChunk;
309 cPagesLeft -= cPagesInChunk;
310 offPage = 0;
311 }
312 else if (vrc == VERR_NO_MEMORY && cMaxPages > 4)
313 {
314 /*
315 * The host probably doesn't have enough heap to handle the
316 * request, reduce the page count and retry.
317 */
318 cMaxPages /= 4;
319 Assert(cMaxPages > 0);
320 }
321 else
322 {
323 /*
324 * If we've successfully read stuff, return it rather than
325 * the error. (Not sure if this is such a great idea...)
326 */
327 if (cbRet > 0)
328 Log(("vbsfNtReadWorker: read at %#RX64 -> %Rrc; got cbRet=%#zx already\n", offFile, vrc, cbRet));
329 else
330 {
331 rcNt = vbsfNtVBoxStatusToNt(vrc);
332 Log(("vbsfNtReadWorker: read at %#RX64 -> %Rrc (rcNt=%#x)\n", offFile, vrc, rcNt));
333 }
334 break;
335 }
336
337 }
338
339 VbglR0PhysHeapFree(pReq);
340 }
341 else
342 rcNt = STATUS_INSUFFICIENT_RESOURCES;
343 RxContext->InformationToReturn = cbRet;
344 LogFlow(("vbsfNtReadWorker: returns %#x cbRet=%#x @ %#RX64\n",
345 rcNt, cbRet, RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset));
346 return rcNt;
347}
348
349/**
350 * Wrapper for RxDispatchToWorkerThread().
351 */
352static VOID vbsfNtReadThreadWorker(VOID *pv)
353{
354 PRX_CONTEXT RxContext = (PRX_CONTEXT)pv;
355
356 Log(("VBOXSF: vbsfNtReadThreadWorker: calling the worker\n"));
357
358 RxContext->IoStatusBlock.Status = vbsfNtReadWorker(RxContext);
359
360 Log(("VBOXSF: vbsfNtReadThreadWorker: Status 0x%08X\n",
361 RxContext->IoStatusBlock.Status));
362
363 RxLowIoCompletion(RxContext);
364}
365
366/**
367 * Read stuff from a file.
368 *
369 * Prior to calling us, RDBSS will have:
370 * - Called CcFlushCache() for uncached accesses.
371 * - For non-paging access the Fcb.Header.Resource lock in shared mode in one
372 * way or another (ExAcquireResourceSharedLite,
373 * ExAcquireSharedWaitForExclusive).
374 * - For paging the FCB isn't, but the Fcb.Header.PagingResource is taken
375 * in shared mode (ExAcquireResourceSharedLite).
376 *
377 * Upon completion, it will update the file pointer if applicable. There are no
378 * EOF checks and corresponding file size updating like in the write case, so
379 * that's something we have to do ourselves it seems since the library relies on
380 * the size information to be accurate in a few places (set EOF, cached reads).
381 */
382NTSTATUS VBoxMRxRead(IN PRX_CONTEXT RxContext)
383{
384 NTSTATUS Status;
385
386 /* If synchronous operation, keep it on this thread (RDBSS already checked
387 if we've got enough stack before calling us). */
388 if (!(RxContext->Flags & RX_CONTEXT_FLAG_ASYNC_OPERATION))
389 {
390 RxContext->IoStatusBlock.Status = Status = vbsfNtReadWorker(RxContext);
391 Assert(Status != STATUS_PENDING);
392
393 Log(("VBOXSF: VBoxMRxRead: vbsfNtReadWorker: Status %#08X\n", Status));
394 }
395 else
396 {
397 Status = RxDispatchToWorkerThread(VBoxMRxDeviceObject, DelayedWorkQueue, vbsfNtReadThreadWorker, RxContext);
398
399 Log(("VBOXSF: VBoxMRxRead: RxDispatchToWorkerThread: Status 0x%08X\n", Status));
400
401 if (Status == STATUS_SUCCESS)
402 Status = STATUS_PENDING;
403 }
404
405 return Status;
406}
407
408/**
409 * Performs a write.
410 *
411 * @note Almost identical to vbsfNtReadWorker.
412 */
413static NTSTATUS vbsfNtWriteWorker(PRX_CONTEXT RxContext)
414{
415 RxCaptureFcb;
416 RxCaptureFobx;
417 PMRX_VBOX_NETROOT_EXTENSION pNetRootX = VBoxMRxGetNetRootExtension(capFcb->pNetRoot);
418 PVBSFNTFCBEXT pVBoxFcbX = VBoxMRxGetFcbExtension(capFcb);
419 PMRX_VBOX_FOBX pVBoxFobX = VBoxMRxGetFileObjectExtension(capFobx);
420 PMDL pBufferMdl = RxContext->LowIoContext.ParamsFor.ReadWrite.Buffer;
421
422 LogFlow(("vbsfNtWriteWorker: hFile=%#RX64 offFile=%#RX64 cbToWrite=%#x %s\n", pVBoxFobX->hFile,
423 RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset, RxContext->LowIoContext.ParamsFor.ReadWrite.ByteCount,
424 RxContext->Flags & RX_CONTEXT_FLAG_ASYNC_OPERATION ? " async" : "sync"));
425
426 AssertReturn(pBufferMdl, STATUS_INTERNAL_ERROR);
427
428 /*
429 * We should never get a zero byte request (RDBSS checks), but in case we
430 * do, it should succeed.
431 */
432 uint32_t cbRet = 0;
433 uint32_t cbLeft = RxContext->LowIoContext.ParamsFor.ReadWrite.ByteCount;
434 AssertReturnStmt(cbLeft > 0, RxContext->InformationToReturn = 0, STATUS_SUCCESS);
435
436 Assert(cbLeft <= MmGetMdlByteCount(pBufferMdl));
437
438 /*
439 * Allocate a request buffer.
440 */
441 uint32_t cPagesLeft = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(pBufferMdl), cbLeft);
442 uint32_t cMaxPages = RT_MIN(cPagesLeft, VBSF_MAX_IO_PAGES);
443 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ,
444 PgLst.aPages[cMaxPages]));
445 while (!pReq && cMaxPages > 4)
446 {
447 cMaxPages /= 2;
448 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
449 }
450 NTSTATUS rcNt = STATUS_SUCCESS;
451 if (pReq)
452 {
453 /*
454 * The write loop.
455 */
456 RTFOFF offFile = RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset;
457 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pBufferMdl);
458 uint32_t offPage = MmGetMdlByteOffset(pBufferMdl);
459 if (offPage < PAGE_SIZE)
460 { /* likely */ }
461 else
462 {
463 paPfns += offPage >> PAGE_SHIFT;
464 offPage &= PAGE_OFFSET_MASK;
465 }
466
467 for (;;)
468 {
469 /*
470 * Figure out how much to process now and set up the page list for it.
471 */
472 uint32_t cPagesInChunk;
473 uint32_t cbChunk;
474 if (cPagesLeft <= cMaxPages)
475 {
476 cPagesInChunk = cPagesLeft;
477 cbChunk = cbLeft;
478 }
479 else
480 {
481 cPagesInChunk = cMaxPages;
482 cbChunk = (cMaxPages << PAGE_SHIFT) - offPage;
483 }
484
485 size_t iPage = cPagesInChunk;
486 while (iPage-- > 0)
487 pReq->PgLst.aPages[iPage] = (RTGCPHYS)paPfns[iPage] << PAGE_SHIFT;
488 pReq->PgLst.offFirstPage = offPage;
489
490#if 0 /* Instead we hook into write.obj's import function pointers to do this more efficiently. */
491 /*
492 * Flush and purge the cache range we're touching upon now, provided we can and
493 * really needs to. The CcCoherencyFlushAndPurgeCache API seems to work better
494 * than the CcFlushCache + CcPurgeCacheSection that RDBSS does before calling us.
495 */
496 if ( g_pfnCcCoherencyFlushAndPurgeCache
497 && !(RxContext->CurrentIrp && (RxContext->CurrentIrp->Flags & IRP_PAGING_IO))
498 && RxContext->NonPagedFcb != NULL
499 && RxContext->NonPagedFcb->SectionObjectPointers.DataSectionObject != NULL)
500 {
501 LARGE_INTEGER offFlush;
502 offFlush.QuadPart = offFile;
503 BOOLEAN fAcquiredLock = RxAcquirePagingIoResource(NULL, capFcb);
504 g_pfnCcCoherencyFlushAndPurgeCache(&RxContext->NonPagedFcb->SectionObjectPointers, &offFlush, cbChunk,
505 &RxContext->CurrentIrp->IoStatus, 0 /*fFlags*/);
506 if (fAcquiredLock)
507 { RxReleasePagingIoResource(NULL, capFcb); /* requires {} */ }
508 }
509#endif
510
511 /*
512 * Issue the request and unlock the pages.
513 */
514 int vrc = VbglR0SfHostReqWritePgLst(pNetRootX->map.root, pReq, pVBoxFobX->hFile, offFile, cbChunk, cPagesInChunk);
515 if (RT_SUCCESS(vrc))
516 {
517 /*
518 * Success, advance position and buffer.
519 */
520 uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
521 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
522 cbRet += cbActual;
523 offFile += cbActual;
524 cbLeft -= cbActual;
525
526 /*
527 * Update timestamp state (FCB is shared).
528 */
529 pVBoxFobX->fTimestampsImplicitlyUpdated |= VBOX_FOBX_F_INFO_LASTWRITE_TIME;
530 if (pVBoxFcbX->pFobxLastWriteTime != pVBoxFobX)
531 pVBoxFcbX->pFobxLastWriteTime = NULL;
532
533 /*
534 * Are we done already?
535 */
536 if (!cbLeft || cbActual < cbChunk)
537 {
538 /*
539 * Make sure our cached file size value is up to date (RDBSS takes care
540 * of the ones in the FCB as well as the cache manager).
541 */
542 if (cbRet > 0)
543 {
544 if (pVBoxFobX->Info.cbObject < offFile)
545 pVBoxFobX->Info.cbObject = offFile;
546
547 if (pVBoxFobX->Info.cbAllocated < offFile)
548 {
549 pVBoxFobX->Info.cbAllocated = offFile;
550 pVBoxFobX->nsUpToDate = 0;
551 }
552 }
553 break;
554 }
555
556 /*
557 * More to write, advance page related variables and loop.
558 */
559 paPfns += cPagesInChunk;
560 cPagesLeft -= cPagesInChunk;
561 offPage = 0;
562 }
563 else if (vrc == VERR_NO_MEMORY && cMaxPages > 4)
564 {
565 /*
566 * The host probably doesn't have enough heap to handle the
567 * request, reduce the page count and retry.
568 */
569 cMaxPages /= 4;
570 Assert(cMaxPages > 0);
571 }
572 else
573 {
574 /*
575 * If we've successfully written stuff, return it rather than
576 * the error. (Not sure if this is such a great idea...)
577 */
578 if (cbRet > 0)
579 Log(("vbsfNtWriteWorker: write at %#RX64 -> %Rrc; got cbRet=%#zx already\n", offFile, vrc, cbRet));
580 else
581 {
582 rcNt = vbsfNtVBoxStatusToNt(vrc);
583 Log(("vbsfNtWriteWorker: write at %#RX64 -> %Rrc (rcNt=%#x)\n", offFile, vrc, rcNt));
584 }
585 break;
586 }
587
588 }
589
590 VbglR0PhysHeapFree(pReq);
591 }
592 else
593 rcNt = STATUS_INSUFFICIENT_RESOURCES;
594 RxContext->InformationToReturn = cbRet;
595 LogFlow(("vbsfNtWriteWorker: returns %#x cbRet=%#x @ %#RX64\n",
596 rcNt, cbRet, RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset));
597 return rcNt;
598}
599
600/**
601 * Wrapper for RxDispatchToWorkerThread().
602 */
603static VOID vbsfNtWriteThreadWorker(VOID *pv)
604{
605 PRX_CONTEXT RxContext = (PRX_CONTEXT)pv;
606
607 Log(("VBOXSF: vbsfNtWriteThreadWorker: calling the worker\n"));
608
609 RxContext->IoStatusBlock.Status = vbsfNtWriteWorker(RxContext);
610
611 Log(("VBOXSF: vbsfNtWriteThreadWorker: Status 0x%08X\n",
612 RxContext->IoStatusBlock.Status));
613
614 RxLowIoCompletion(RxContext);
615}
616
617NTSTATUS VBoxMRxWrite(IN PRX_CONTEXT RxContext)
618{
619 NTSTATUS Status;
620
621 /* If synchronous operation, keep it on this thread (RDBSS already checked
622 if we've got enough stack before calling us). */
623 if (!(RxContext->Flags & RX_CONTEXT_FLAG_ASYNC_OPERATION))
624 {
625 RxContext->IoStatusBlock.Status = Status = vbsfNtWriteWorker(RxContext);
626 Assert(Status != STATUS_PENDING);
627
628 Log(("VBOXSF: VBoxMRxWrite: vbsfNtWriteWorker: Status %#08X\n", Status));
629 }
630 else
631 {
632 Status = RxDispatchToWorkerThread(VBoxMRxDeviceObject, DelayedWorkQueue, vbsfNtWriteThreadWorker, RxContext);
633
634 Log(("VBOXSF: VBoxMRxWrite: RxDispatchToWorkerThread: Status 0x%08X\n", Status));
635
636 if (Status == STATUS_SUCCESS)
637 Status = STATUS_PENDING;
638 }
639
640 return Status;
641}
642
643
644NTSTATUS VBoxMRxLocks(IN PRX_CONTEXT RxContext)
645{
646 NTSTATUS Status = STATUS_SUCCESS;
647
648 RxCaptureFcb;
649 RxCaptureFobx;
650
651 PMRX_VBOX_NETROOT_EXTENSION pNetRootExtension = VBoxMRxGetNetRootExtension(capFcb->pNetRoot);
652 PMRX_VBOX_FOBX pVBoxFobx = VBoxMRxGetFileObjectExtension(capFobx);
653
654 PLOWIO_CONTEXT LowIoContext = &RxContext->LowIoContext;
655 uint32_t fu32Lock = 0;
656 int vrc;
657
658 Log(("VBOXSF: MRxLocks: Operation %d\n",
659 LowIoContext->Operation));
660
661 switch (LowIoContext->Operation)
662 {
663 default:
664 AssertMsgFailed(("VBOXSF: MRxLocks: Unsupported lock/unlock type %d detected!\n",
665 LowIoContext->Operation));
666 return STATUS_NOT_IMPLEMENTED;
667
668 case LOWIO_OP_UNLOCK_MULTIPLE:
669 /** @todo Remove multiple locks listed in LowIoContext.ParamsFor.Locks.LockList. */
670 Log(("VBOXSF: MRxLocks: Unsupported LOWIO_OP_UNLOCK_MULTIPLE!\n",
671 LowIoContext->Operation));
672 return STATUS_NOT_IMPLEMENTED;
673
674 case LOWIO_OP_SHAREDLOCK:
675 fu32Lock = SHFL_LOCK_SHARED | SHFL_LOCK_PARTIAL;
676 break;
677
678 case LOWIO_OP_EXCLUSIVELOCK:
679 fu32Lock = SHFL_LOCK_EXCLUSIVE | SHFL_LOCK_PARTIAL;
680 break;
681
682 case LOWIO_OP_UNLOCK:
683 fu32Lock = SHFL_LOCK_CANCEL | SHFL_LOCK_PARTIAL;
684 break;
685 }
686
687 if (LowIoContext->ParamsFor.Locks.Flags & LOWIO_LOCKSFLAG_FAIL_IMMEDIATELY)
688 fu32Lock |= SHFL_LOCK_NOWAIT;
689 else
690 fu32Lock |= SHFL_LOCK_WAIT;
691
692 vrc = VbglR0SfLock(&g_SfClient, &pNetRootExtension->map, pVBoxFobx->hFile,
693 LowIoContext->ParamsFor.Locks.ByteOffset, LowIoContext->ParamsFor.Locks.Length, fu32Lock);
694
695 Status = vbsfNtVBoxStatusToNt(vrc);
696
697 Log(("VBOXSF: MRxLocks: Returned 0x%08X\n", Status));
698 return Status;
699}
700
701NTSTATUS VBoxMRxCompleteBufferingStateChangeRequest(IN OUT PRX_CONTEXT RxContext, IN OUT PMRX_SRV_OPEN SrvOpen,
702 IN PVOID pvContext)
703{
704 RT_NOREF(RxContext, SrvOpen, pvContext);
705 Log(("VBOXSF: MRxCompleteBufferingStateChangeRequest: not implemented\n"));
706 return STATUS_NOT_IMPLEMENTED;
707}
708
709NTSTATUS VBoxMRxFlush (IN PRX_CONTEXT RxContext)
710{
711 NTSTATUS Status = STATUS_SUCCESS;
712
713 RxCaptureFcb;
714 RxCaptureFobx;
715
716 PMRX_VBOX_NETROOT_EXTENSION pNetRootExtension = VBoxMRxGetNetRootExtension(capFcb->pNetRoot);
717 PMRX_VBOX_FOBX pVBoxFobx = VBoxMRxGetFileObjectExtension(capFobx);
718
719 int vrc;
720
721 Log(("VBOXSF: MRxFlush\n"));
722
723 /* Do the actual flushing of file buffers */
724 vrc = VbglR0SfFlush(&g_SfClient, &pNetRootExtension->map, pVBoxFobx->hFile);
725
726 Status = vbsfNtVBoxStatusToNt(vrc);
727
728 Log(("VBOXSF: MRxFlush: Returned 0x%08X\n", Status));
729 return Status;
730}
731
732/** See PMRX_EXTENDFILE_CALLDOWN in ddk/mrx.h
733 *
734 * Documentation says it returns STATUS_SUCCESS on success and an error
735 * status on failure, so the ULONG return type is probably just a typo that
736 * stuck.
737 */
738ULONG NTAPI VBoxMRxExtendStub(IN OUT struct _RX_CONTEXT * RxContext, IN OUT PLARGE_INTEGER pNewFileSize,
739 OUT PLARGE_INTEGER pNewAllocationSize)
740{
741 RT_NOREF(RxContext);
742
743 /* Note: On Windows hosts vbsfNtSetEndOfFile returns ACCESS_DENIED if the file has been
744 * opened in APPEND mode. Writes to a file will extend it anyway, therefore it is
745 * better to not call the host at all and tell the caller that the file was extended.
746 */
747 Log(("VBOXSF: MRxExtendStub: new size = %RX64\n",
748 pNewFileSize->QuadPart));
749
750 pNewAllocationSize->QuadPart = pNewFileSize->QuadPart;
751
752 return STATUS_SUCCESS;
753}
754
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use