[40900] | 1 | /* $Id: regops.c 88716 2021-04-26 20:41:51Z vboxsync $ */
|
---|
[2614] | 2 | /** @file
|
---|
[76744] | 3 | * vboxsf - VBox Linux Shared Folders VFS, regular file inode and file operations.
|
---|
[2614] | 4 | */
|
---|
| 5 |
|
---|
| 6 | /*
|
---|
[82968] | 7 | * Copyright (C) 2006-2020 Oracle Corporation
|
---|
[2614] | 8 | *
|
---|
[72627] | 9 | * Permission is hereby granted, free of charge, to any person
|
---|
| 10 | * obtaining a copy of this software and associated documentation
|
---|
| 11 | * files (the "Software"), to deal in the Software without
|
---|
| 12 | * restriction, including without limitation the rights to use,
|
---|
| 13 | * copy, modify, merge, publish, distribute, sublicense, and/or sell
|
---|
| 14 | * copies of the Software, and to permit persons to whom the
|
---|
| 15 | * Software is furnished to do so, subject to the following
|
---|
| 16 | * conditions:
|
---|
| 17 | *
|
---|
| 18 | * The above copyright notice and this permission notice shall be
|
---|
| 19 | * included in all copies or substantial portions of the Software.
|
---|
| 20 | *
|
---|
| 21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
---|
| 22 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
---|
| 23 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
---|
| 24 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
---|
| 25 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
---|
| 26 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
---|
| 27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
---|
| 28 | * OTHER DEALINGS IN THE SOFTWARE.
|
---|
[2614] | 29 | */
|
---|
| 30 |
|
---|
[77626] | 31 |
|
---|
| 32 | /*********************************************************************************************************************************
|
---|
| 33 | * Header Files *
|
---|
| 34 | *********************************************************************************************************************************/
|
---|
[4729] | 35 | #include "vfsmod.h"
|
---|
[77419] | 36 | #include <linux/uio.h>
|
---|
[85698] | 37 | #if RTLNX_VER_MIN(2,5,32)
|
---|
[77419] | 38 | # include <linux/aio.h> /* struct kiocb before 4.1 */
|
---|
| 39 | #endif
|
---|
[85698] | 40 | #if RTLNX_VER_MIN(2,5,12)
|
---|
[77419] | 41 | # include <linux/buffer_head.h>
|
---|
| 42 | #endif
|
---|
[85698] | 43 | #if RTLNX_VER_RANGE(2,5,12, 2,6,31)
|
---|
[77419] | 44 | # include <linux/writeback.h>
|
---|
| 45 | #endif
|
---|
[85698] | 46 | #if RTLNX_VER_RANGE(2,6,23, 3,16,0)
|
---|
[77419] | 47 | # include <linux/splice.h>
|
---|
| 48 | #endif
|
---|
[85698] | 49 | #if RTLNX_VER_RANGE(2,6,17, 2,6,23)
|
---|
[77940] | 50 | # include <linux/pipe_fs_i.h>
|
---|
| 51 | #endif
|
---|
[85698] | 52 | #if RTLNX_VER_MIN(2,4,10)
|
---|
[77770] | 53 | # include <linux/swap.h> /* for mark_page_accessed */
|
---|
| 54 | #endif
|
---|
[77439] | 55 | #include <iprt/err.h>
|
---|
[4729] | 56 |
|
---|
[85698] | 57 | #if RTLNX_VER_MAX(2,6,18)
|
---|
[77419] | 58 | # define SEEK_END 2
|
---|
| 59 | #endif
|
---|
| 60 |
|
---|
[85698] | 61 | #if RTLNX_VER_MAX(3,16,0)
|
---|
[77873] | 62 | # define iter_is_iovec(a_pIter) ( !((a_pIter)->type & ITER_KVEC) )
|
---|
[85698] | 63 | #elif RTLNX_VER_MAX(3,19,0)
|
---|
[77706] | 64 | # define iter_is_iovec(a_pIter) ( !((a_pIter)->type & (ITER_KVEC | ITER_BVEC)) )
|
---|
| 65 | #endif
|
---|
[77419] | 66 |
|
---|
[85698] | 67 | #if RTLNX_VER_MAX(4,17,0)
|
---|
[77731] | 68 | # define vm_fault_t int
|
---|
| 69 | #endif
|
---|
[77706] | 70 |
|
---|
[85698] | 71 | #if RTLNX_VER_MAX(2,5,20)
|
---|
[77770] | 72 | # define pgoff_t unsigned long
|
---|
| 73 | #endif
|
---|
[77731] | 74 |
|
---|
[85698] | 75 | #if RTLNX_VER_MAX(2,5,12)
|
---|
[77966] | 76 | # define PageUptodate(a_pPage) Page_Uptodate(a_pPage)
|
---|
| 77 | #endif
|
---|
[77770] | 78 |
|
---|
[77966] | 79 |
|
---|
[77626] | 80 | /*********************************************************************************************************************************
|
---|
| 81 | * Structures and Typedefs *
|
---|
| 82 | *********************************************************************************************************************************/
|
---|
[85698] | 83 | #if RTLNX_VER_MAX(3,16,0)
|
---|
[77873] | 84 | struct vbsf_iov_iter {
|
---|
| 85 | unsigned int type;
|
---|
| 86 | unsigned int v_write : 1;
|
---|
| 87 | size_t iov_offset;
|
---|
| 88 | size_t nr_segs;
|
---|
| 89 | struct iovec const *iov;
|
---|
| 90 | # ifdef VBOX_STRICT
|
---|
[77880] | 91 | struct iovec const *iov_org;
|
---|
| 92 | size_t nr_segs_org;
|
---|
[77873] | 93 | # endif
|
---|
| 94 | };
|
---|
| 95 | # ifdef VBOX_STRICT
|
---|
[77949] | 96 | # define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) \
|
---|
| 97 | { vbsf_iov_iter_detect_type(a_pIov, a_cSegs), a_fWrite, 0, a_cSegs, a_pIov, a_pIov, a_cSegs }
|
---|
[77873] | 98 | # else
|
---|
[77949] | 99 | # define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) \
|
---|
| 100 | { vbsf_iov_iter_detect_type(a_pIov, a_cSegs), a_fWrite, 0, a_cSegs, a_pIov }
|
---|
[77873] | 101 | # endif
|
---|
| 102 | # define ITER_KVEC 1
|
---|
| 103 | # define iov_iter vbsf_iov_iter
|
---|
| 104 | #endif
|
---|
| 105 |
|
---|
[85698] | 106 | #if RTLNX_VER_MIN(2,6,19)
|
---|
[77626] | 107 | /** Used by vbsf_iter_lock_pages() to keep the first page of the next segment. */
|
---|
| 108 | struct vbsf_iter_stash {
|
---|
| 109 | struct page *pPage;
|
---|
| 110 | size_t off;
|
---|
| 111 | size_t cb;
|
---|
[85698] | 112 | # if RTLNX_VER_MAX(4,11,0)
|
---|
[77626] | 113 | size_t offFromEnd;
|
---|
| 114 | struct iov_iter Copy;
|
---|
| 115 | # endif
|
---|
| 116 | };
|
---|
| 117 | #endif /* >= 3.16.0 */
|
---|
| 118 | /** Initializer for struct vbsf_iter_stash. */
|
---|
[85698] | 119 | #if RTLNX_VER_MIN(4,11,0)
|
---|
[77626] | 120 | # define VBSF_ITER_STASH_INITIALIZER { NULL, 0 }
|
---|
| 121 | #else
|
---|
| 122 | # define VBSF_ITER_STASH_INITIALIZER { NULL, 0, ~(size_t)0 }
|
---|
| 123 | #endif
|
---|
| 124 |
|
---|
| 125 |
|
---|
[77873] | 126 | /*********************************************************************************************************************************
|
---|
| 127 | * Internal Functions *
|
---|
| 128 | *********************************************************************************************************************************/
|
---|
[77944] | 129 | DECLINLINE(void) vbsf_put_page(struct page *pPage);
|
---|
[77966] | 130 | static void vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack);
|
---|
[77943] | 131 | static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
|
---|
| 132 | uint8_t const *pbSrcBuf, struct page **papSrcPages,
|
---|
| 133 | uint32_t offSrcPage, size_t cSrcPages);
|
---|
[77626] | 134 |
|
---|
[77873] | 135 |
|
---|
| 136 | /*********************************************************************************************************************************
|
---|
| 137 | * Provide more recent uio.h functionality to older kernels. *
|
---|
| 138 | *********************************************************************************************************************************/
|
---|
[85698] | 139 | #if RTLNX_VER_RANGE(2,6,19, 3,16,0)
|
---|
[77873] | 140 |
|
---|
[77949] | 141 | /**
|
---|
| 142 | * Detects the vector type.
|
---|
| 143 | */
|
---|
| 144 | static int vbsf_iov_iter_detect_type(struct iovec const *paIov, size_t cSegs)
|
---|
| 145 | {
|
---|
| 146 | /* Check the first segment with a non-zero length. */
|
---|
| 147 | while (cSegs-- > 0) {
|
---|
| 148 | if (paIov->iov_len > 0) {
|
---|
| 149 | if (access_ok(VERIFY_READ, paIov->iov_base, paIov->iov_len))
|
---|
[87053] | 150 | #if RTLNX_VER_MIN(5,10,0)
|
---|
| 151 | return (uintptr_t)paIov->iov_base >= TASK_SIZE_MAX ? ITER_KVEC : 0;
|
---|
| 152 | #else
|
---|
[77949] | 153 | return (uintptr_t)paIov->iov_base >= USER_DS.seg ? ITER_KVEC : 0;
|
---|
[87053] | 154 | #endif
|
---|
[77949] | 155 | AssertMsgFailed(("%p LB %#zx\n", paIov->iov_base, paIov->iov_len));
|
---|
| 156 | break;
|
---|
| 157 | }
|
---|
| 158 | paIov++;
|
---|
| 159 | }
|
---|
| 160 | return 0;
|
---|
| 161 | }
|
---|
| 162 |
|
---|
| 163 |
|
---|
[77873] | 164 | # undef iov_iter_count
|
---|
| 165 | # define iov_iter_count(a_pIter) vbsf_iov_iter_count(a_pIter)
|
---|
| 166 | static size_t vbsf_iov_iter_count(struct vbsf_iov_iter const *iter)
|
---|
| 167 | {
|
---|
| 168 | size_t cbRet = 0;
|
---|
| 169 | size_t cLeft = iter->nr_segs;
|
---|
| 170 | struct iovec const *iov = iter->iov;
|
---|
| 171 | while (cLeft-- > 0) {
|
---|
| 172 | cbRet += iov->iov_len;
|
---|
| 173 | iov++;
|
---|
| 174 | }
|
---|
| 175 | return cbRet - iter->iov_offset;
|
---|
| 176 | }
|
---|
| 177 |
|
---|
| 178 |
|
---|
| 179 | # undef iov_iter_single_seg_count
|
---|
| 180 | # define iov_iter_single_seg_count(a_pIter) vbsf_iov_iter_single_seg_count(a_pIter)
|
---|
| 181 | static size_t vbsf_iov_iter_single_seg_count(struct vbsf_iov_iter const *iter)
|
---|
| 182 | {
|
---|
| 183 | if (iter->nr_segs > 0)
|
---|
| 184 | return iter->iov->iov_len - iter->iov_offset;
|
---|
| 185 | return 0;
|
---|
| 186 | }
|
---|
| 187 |
|
---|
| 188 |
|
---|
| 189 | # undef iov_iter_advance
|
---|
| 190 | # define iov_iter_advance(a_pIter, a_cbSkip) vbsf_iov_iter_advance(a_pIter, a_cbSkip)
|
---|
| 191 | static void vbsf_iov_iter_advance(struct vbsf_iov_iter *iter, size_t cbSkip)
|
---|
| 192 | {
|
---|
| 193 | SFLOG2(("vbsf_iov_iter_advance: cbSkip=%#zx\n", cbSkip));
|
---|
| 194 | if (iter->nr_segs > 0) {
|
---|
| 195 | size_t const cbLeftCur = iter->iov->iov_len - iter->iov_offset;
|
---|
| 196 | Assert(iter->iov_offset <= iter->iov->iov_len);
|
---|
| 197 | if (cbLeftCur > cbSkip) {
|
---|
| 198 | iter->iov_offset += cbSkip;
|
---|
| 199 | } else {
|
---|
| 200 | cbSkip -= cbLeftCur;
|
---|
| 201 | iter->iov_offset = 0;
|
---|
| 202 | iter->iov++;
|
---|
| 203 | iter->nr_segs--;
|
---|
| 204 | while (iter->nr_segs > 0) {
|
---|
| 205 | size_t const cbSeg = iter->iov->iov_len;
|
---|
| 206 | if (cbSeg > cbSkip) {
|
---|
| 207 | iter->iov_offset = cbSkip;
|
---|
| 208 | break;
|
---|
| 209 | }
|
---|
| 210 | cbSkip -= cbSeg;
|
---|
| 211 | iter->iov++;
|
---|
| 212 | iter->nr_segs--;
|
---|
| 213 | }
|
---|
| 214 | }
|
---|
| 215 | }
|
---|
| 216 | }
|
---|
| 217 |
|
---|
| 218 |
|
---|
| 219 | # undef iov_iter_get_pages
|
---|
| 220 | # define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
|
---|
| 221 | vbsf_iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
|
---|
| 222 | static ssize_t vbsf_iov_iter_get_pages(struct vbsf_iov_iter *iter, struct page **papPages,
|
---|
| 223 | size_t cbMax, unsigned cMaxPages, size_t *poffPg0)
|
---|
| 224 | {
|
---|
| 225 | while (iter->nr_segs > 0) {
|
---|
| 226 | size_t const cbLeft = iter->iov->iov_len - iter->iov_offset;
|
---|
| 227 | Assert(iter->iov->iov_len >= iter->iov_offset);
|
---|
| 228 | if (cbLeft > 0) {
|
---|
| 229 | uintptr_t uPtrFrom = (uintptr_t)iter->iov->iov_base + iter->iov_offset;
|
---|
| 230 | size_t offPg0 = *poffPg0 = uPtrFrom & PAGE_OFFSET_MASK;
|
---|
| 231 | size_t cPagesLeft = RT_ALIGN_Z(offPg0 + cbLeft, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 232 | size_t cPages = RT_MIN(cPagesLeft, cMaxPages);
|
---|
| 233 | struct task_struct *pTask = current;
|
---|
| 234 | size_t cPagesLocked;
|
---|
| 235 |
|
---|
| 236 | down_read(&pTask->mm->mmap_sem);
|
---|
| 237 | cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, iter->v_write, 1 /*force*/, papPages, NULL);
|
---|
| 238 | up_read(&pTask->mm->mmap_sem);
|
---|
| 239 | if (cPagesLocked == cPages) {
|
---|
| 240 | size_t cbRet = (cPages << PAGE_SHIFT) - offPg0;
|
---|
| 241 | if (cPages == cPagesLeft) {
|
---|
| 242 | size_t offLastPg = (uPtrFrom + cbLeft) & PAGE_OFFSET_MASK;
|
---|
| 243 | if (offLastPg)
|
---|
| 244 | cbRet -= PAGE_SIZE - offLastPg;
|
---|
| 245 | }
|
---|
| 246 | Assert(cbRet <= cbLeft);
|
---|
| 247 | return cbRet;
|
---|
| 248 | }
|
---|
| 249 | if (cPagesLocked > 0)
|
---|
| 250 | vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
|
---|
| 251 | return -EFAULT;
|
---|
| 252 | }
|
---|
| 253 | iter->iov_offset = 0;
|
---|
| 254 | iter->iov++;
|
---|
| 255 | iter->nr_segs--;
|
---|
| 256 | }
|
---|
| 257 | AssertFailed();
|
---|
| 258 | return 0;
|
---|
| 259 | }
|
---|
| 260 |
|
---|
| 261 |
|
---|
| 262 | # undef iov_iter_truncate
|
---|
| 263 | # define iov_iter_truncate(iter, cbNew) vbsf_iov_iter_truncate(iter, cbNew)
|
---|
| 264 | static void vbsf_iov_iter_truncate(struct vbsf_iov_iter *iter, size_t cbNew)
|
---|
| 265 | {
|
---|
| 266 | /* we have no counter or stuff, so it's a no-op. */
|
---|
| 267 | RT_NOREF(iter, cbNew);
|
---|
| 268 | }
|
---|
| 269 |
|
---|
| 270 |
|
---|
| 271 | # undef iov_iter_revert
|
---|
| 272 | # define iov_iter_revert(a_pIter, a_cbRewind) vbsf_iov_iter_revert(a_pIter, a_cbRewind)
|
---|
| 273 | void vbsf_iov_iter_revert(struct vbsf_iov_iter *iter, size_t cbRewind)
|
---|
| 274 | {
|
---|
| 275 | SFLOG2(("vbsf_iov_iter_revert: cbRewind=%#zx\n", cbRewind));
|
---|
| 276 | if (iter->iov_offset > 0) {
|
---|
| 277 | if (cbRewind <= iter->iov_offset) {
|
---|
| 278 | iter->iov_offset -= cbRewind;
|
---|
| 279 | return;
|
---|
| 280 | }
|
---|
| 281 | cbRewind -= iter->iov_offset;
|
---|
| 282 | iter->iov_offset = 0;
|
---|
| 283 | }
|
---|
| 284 |
|
---|
| 285 | while (cbRewind > 0) {
|
---|
| 286 | struct iovec const *pIov = --iter->iov;
|
---|
| 287 | size_t const cbSeg = pIov->iov_len;
|
---|
| 288 | iter->nr_segs++;
|
---|
| 289 |
|
---|
| 290 | Assert((uintptr_t)pIov >= (uintptr_t)iter->iov_org);
|
---|
[77878] | 291 | Assert(iter->nr_segs <= iter->nr_segs_org);
|
---|
[77873] | 292 |
|
---|
| 293 | if (cbRewind <= cbSeg) {
|
---|
| 294 | iter->iov_offset = cbSeg - cbRewind;
|
---|
| 295 | break;
|
---|
| 296 | }
|
---|
| 297 | cbRewind -= cbSeg;
|
---|
| 298 | }
|
---|
| 299 | }
|
---|
| 300 |
|
---|
| 301 | #endif /* 2.6.19 <= linux < 3.16.0 */
|
---|
[85698] | 302 | #if RTLNX_VER_RANGE(3,16,0, 3,16,35)
|
---|
[78139] | 303 |
|
---|
| 304 | /** This is for implementing cMaxPage on 3.16 which doesn't have it. */
|
---|
| 305 | static ssize_t vbsf_iov_iter_get_pages_3_16(struct iov_iter *iter, struct page **papPages,
|
---|
| 306 | size_t cbMax, unsigned cMaxPages, size_t *poffPg0)
|
---|
| 307 | {
|
---|
| 308 | if (!(iter->type & ITER_BVEC)) {
|
---|
| 309 | size_t const offPg0 = iter->iov_offset & PAGE_OFFSET_MASK;
|
---|
| 310 | size_t const cbMaxPages = ((size_t)cMaxPages << PAGE_SHIFT) - offPg0;
|
---|
| 311 | if (cbMax > cbMaxPages)
|
---|
| 312 | cbMax = cbMaxPages;
|
---|
| 313 | }
|
---|
| 314 | /* else: BVEC works a page at a time and shouldn't have much of a problem here. */
|
---|
| 315 | return iov_iter_get_pages(iter, papPages, cbMax, poffPg0);
|
---|
| 316 | }
|
---|
| 317 | # undef iov_iter_get_pages
|
---|
| 318 | # define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
|
---|
| 319 | vbsf_iov_iter_get_pages_3_16(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
|
---|
| 320 |
|
---|
[78185] | 321 | #endif /* 3.16.0-3.16.34 */
|
---|
[85698] | 322 | #if RTLNX_VER_RANGE(2,6,19, 3,18,0)
|
---|
[77873] | 323 |
|
---|
| 324 | static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter)
|
---|
| 325 | {
|
---|
| 326 | size_t const cbTotal = cbToCopy;
|
---|
| 327 | Assert(iov_iter_count(pSrcIter) >= cbToCopy);
|
---|
[85698] | 328 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77873] | 329 | if (pSrcIter->type & ITER_BVEC) {
|
---|
| 330 | while (cbToCopy > 0) {
|
---|
| 331 | size_t const offPage = (uintptr_t)pbDst & PAGE_OFFSET_MASK;
|
---|
| 332 | size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
|
---|
| 333 | struct page *pPage = rtR0MemObjLinuxVirtToPage(pbDst);
|
---|
| 334 | size_t cbCopied = copy_page_from_iter(pPage, offPage, cbThisCopy, pSrcIter);
|
---|
| 335 | AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
|
---|
| 336 | pbDst += cbCopied;
|
---|
| 337 | cbToCopy -= cbCopied;
|
---|
| 338 | if (cbCopied != cbToCopy)
|
---|
| 339 | break;
|
---|
| 340 | }
|
---|
| 341 | } else
|
---|
| 342 | # endif
|
---|
| 343 | {
|
---|
| 344 | while (cbToCopy > 0) {
|
---|
| 345 | size_t cbThisCopy = iov_iter_single_seg_count(pSrcIter);
|
---|
| 346 | if (cbThisCopy > 0) {
|
---|
| 347 | if (cbThisCopy > cbToCopy)
|
---|
| 348 | cbThisCopy = cbToCopy;
|
---|
| 349 | if (pSrcIter->type & ITER_KVEC)
|
---|
| 350 | memcpy(pbDst, (void *)pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy);
|
---|
[80582] | 351 | else if (copy_from_user(pbDst, pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy) != 0)
|
---|
[77873] | 352 | break;
|
---|
| 353 | pbDst += cbThisCopy;
|
---|
| 354 | cbToCopy -= cbThisCopy;
|
---|
| 355 | }
|
---|
| 356 | iov_iter_advance(pSrcIter, cbThisCopy);
|
---|
| 357 | }
|
---|
| 358 | }
|
---|
| 359 | return cbTotal - cbToCopy;
|
---|
| 360 | }
|
---|
| 361 |
|
---|
| 362 |
|
---|
| 363 | static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter)
|
---|
| 364 | {
|
---|
| 365 | size_t const cbTotal = cbToCopy;
|
---|
| 366 | Assert(iov_iter_count(pDstIter) >= cbToCopy);
|
---|
[85698] | 367 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77873] | 368 | if (pDstIter->type & ITER_BVEC) {
|
---|
| 369 | while (cbToCopy > 0) {
|
---|
| 370 | size_t const offPage = (uintptr_t)pbSrc & PAGE_OFFSET_MASK;
|
---|
| 371 | size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
|
---|
| 372 | struct page *pPage = rtR0MemObjLinuxVirtToPage((void *)pbSrc);
|
---|
| 373 | size_t cbCopied = copy_page_to_iter(pPage, offPage, cbThisCopy, pDstIter);
|
---|
| 374 | AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
|
---|
| 375 | pbSrc += cbCopied;
|
---|
| 376 | cbToCopy -= cbCopied;
|
---|
| 377 | if (cbCopied != cbToCopy)
|
---|
| 378 | break;
|
---|
| 379 | }
|
---|
| 380 | } else
|
---|
| 381 | # endif
|
---|
| 382 | {
|
---|
| 383 | while (cbToCopy > 0) {
|
---|
| 384 | size_t cbThisCopy = iov_iter_single_seg_count(pDstIter);
|
---|
| 385 | if (cbThisCopy > 0) {
|
---|
| 386 | if (cbThisCopy > cbToCopy)
|
---|
| 387 | cbThisCopy = cbToCopy;
|
---|
| 388 | if (pDstIter->type & ITER_KVEC)
|
---|
| 389 | memcpy((void *)pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy);
|
---|
[80582] | 390 | else if (copy_to_user(pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy) != 0) {
|
---|
[77873] | 391 | break;
|
---|
| 392 | }
|
---|
| 393 | pbSrc += cbThisCopy;
|
---|
| 394 | cbToCopy -= cbThisCopy;
|
---|
| 395 | }
|
---|
| 396 | iov_iter_advance(pDstIter, cbThisCopy);
|
---|
| 397 | }
|
---|
| 398 | }
|
---|
| 399 | return cbTotal - cbToCopy;
|
---|
| 400 | }
|
---|
| 401 |
|
---|
| 402 | #endif /* 3.16.0 <= linux < 3.18.0 */
|
---|
| 403 |
|
---|
| 404 |
|
---|
| 405 |
|
---|
| 406 | /*********************************************************************************************************************************
|
---|
| 407 | * Handle management *
|
---|
| 408 | *********************************************************************************************************************************/
|
---|
| 409 |
|
---|
[77458] | 410 | /**
|
---|
| 411 | * Called when an inode is released to unlink all handles that might impossibly
|
---|
| 412 | * still be associated with it.
|
---|
| 413 | *
|
---|
| 414 | * @param pInodeInfo The inode which handles to drop.
|
---|
| 415 | */
|
---|
[77530] | 416 | void vbsf_handle_drop_chain(struct vbsf_inode_info *pInodeInfo)
|
---|
[77458] | 417 | {
|
---|
[77536] | 418 | struct vbsf_handle *pCur, *pNext;
|
---|
[77526] | 419 | unsigned long fSavedFlags;
|
---|
[77529] | 420 | SFLOGFLOW(("vbsf_handle_drop_chain: %p\n", pInodeInfo));
|
---|
[77526] | 421 | spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
|
---|
[2614] | 422 |
|
---|
[77536] | 423 | RTListForEachSafe(&pInodeInfo->HandleList, pCur, pNext, struct vbsf_handle, Entry) {
|
---|
[77549] | 424 | AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
|
---|
| 425 | == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
|
---|
| 426 | pCur->fFlags |= VBSF_HANDLE_F_ON_LIST;
|
---|
[77526] | 427 | RTListNodeRemove(&pCur->Entry);
|
---|
| 428 | }
|
---|
[77458] | 429 |
|
---|
[77526] | 430 | spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
|
---|
[77458] | 431 | }
|
---|
| 432 |
|
---|
| 433 |
|
---|
| 434 | /**
|
---|
| 435 | * Locates a handle that matches all the flags in @a fFlags.
|
---|
| 436 | *
|
---|
[77529] | 437 | * @returns Pointer to handle on success (retained), use vbsf_handle_release() to
|
---|
[77458] | 438 | * release it. NULL if no suitable handle was found.
|
---|
| 439 | * @param pInodeInfo The inode info to search.
|
---|
| 440 | * @param fFlagsSet The flags that must be set.
|
---|
| 441 | * @param fFlagsClear The flags that must be clear.
|
---|
| 442 | */
|
---|
[77536] | 443 | struct vbsf_handle *vbsf_handle_find(struct vbsf_inode_info *pInodeInfo, uint32_t fFlagsSet, uint32_t fFlagsClear)
|
---|
[77458] | 444 | {
|
---|
[77536] | 445 | struct vbsf_handle *pCur;
|
---|
[77526] | 446 | unsigned long fSavedFlags;
|
---|
| 447 | spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
|
---|
[77458] | 448 |
|
---|
[77536] | 449 | RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) {
|
---|
[77549] | 450 | AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
|
---|
| 451 | == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
|
---|
[77526] | 452 | if ((pCur->fFlags & (fFlagsSet | fFlagsClear)) == fFlagsSet) {
|
---|
| 453 | uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
|
---|
| 454 | if (cRefs > 1) {
|
---|
| 455 | spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
|
---|
[77529] | 456 | SFLOGFLOW(("vbsf_handle_find: returns %p\n", pCur));
|
---|
[77526] | 457 | return pCur;
|
---|
| 458 | }
|
---|
| 459 | /* Oops, already being closed (safe as it's only ever increased here). */
|
---|
| 460 | ASMAtomicDecU32(&pCur->cRefs);
|
---|
| 461 | }
|
---|
| 462 | }
|
---|
[77458] | 463 |
|
---|
[77526] | 464 | spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
|
---|
[77529] | 465 | SFLOGFLOW(("vbsf_handle_find: returns NULL!\n"));
|
---|
[77526] | 466 | return NULL;
|
---|
[77458] | 467 | }
|
---|
| 468 |
|
---|
| 469 |
|
---|
| 470 | /**
|
---|
[77529] | 471 | * Slow worker for vbsf_handle_release() that does the freeing.
|
---|
[77458] | 472 | *
|
---|
| 473 | * @returns 0 (ref count).
|
---|
[77951] | 474 | * @param pHandle The handle to release.
|
---|
| 475 | * @param pSuperInfo The info structure for the shared folder associated with
|
---|
| 476 | * the handle.
|
---|
| 477 | * @param pszCaller The caller name (for logging failures).
|
---|
[77458] | 478 | */
|
---|
[77951] | 479 | uint32_t vbsf_handle_release_slow(struct vbsf_handle *pHandle, struct vbsf_super_info *pSuperInfo, const char *pszCaller)
|
---|
[77458] | 480 | {
|
---|
[77526] | 481 | int rc;
|
---|
| 482 | unsigned long fSavedFlags;
|
---|
[77458] | 483 |
|
---|
[77529] | 484 | SFLOGFLOW(("vbsf_handle_release_slow: %p (%s)\n", pHandle, pszCaller));
|
---|
[77458] | 485 |
|
---|
[77526] | 486 | /*
|
---|
| 487 | * Remove from the list.
|
---|
| 488 | */
|
---|
| 489 | spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
|
---|
[77458] | 490 |
|
---|
[77536] | 491 | AssertMsg((pHandle->fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC, ("%p %#x\n", pHandle, pHandle->fFlags));
|
---|
[77526] | 492 | Assert(pHandle->pInodeInfo);
|
---|
| 493 | Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
|
---|
[77458] | 494 |
|
---|
[77549] | 495 | if (pHandle->fFlags & VBSF_HANDLE_F_ON_LIST) {
|
---|
| 496 | pHandle->fFlags &= ~VBSF_HANDLE_F_ON_LIST;
|
---|
[77526] | 497 | RTListNodeRemove(&pHandle->Entry);
|
---|
| 498 | }
|
---|
[77458] | 499 |
|
---|
[77526] | 500 | spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
|
---|
[77458] | 501 |
|
---|
[77526] | 502 | /*
|
---|
| 503 | * Actually destroy it.
|
---|
| 504 | */
|
---|
[77951] | 505 | rc = VbglR0SfHostReqCloseSimple(pSuperInfo->map.root, pHandle->hHost);
|
---|
[77526] | 506 | if (RT_FAILURE(rc))
|
---|
| 507 | LogFunc(("Caller %s: VbglR0SfHostReqCloseSimple %#RX64 failed with rc=%Rrc\n", pszCaller, pHandle->hHost, rc));
|
---|
| 508 | pHandle->hHost = SHFL_HANDLE_NIL;
|
---|
[77536] | 509 | pHandle->fFlags = VBSF_HANDLE_F_MAGIC_DEAD;
|
---|
[77526] | 510 | kfree(pHandle);
|
---|
| 511 | return 0;
|
---|
[77458] | 512 | }
|
---|
| 513 |
|
---|
| 514 |
|
---|
| 515 | /**
|
---|
| 516 | * Appends a handle to a handle list.
|
---|
| 517 | *
|
---|
| 518 | * @param pInodeInfo The inode to add it to.
|
---|
| 519 | * @param pHandle The handle to add.
|
---|
| 520 | */
|
---|
[77536] | 521 | void vbsf_handle_append(struct vbsf_inode_info *pInodeInfo, struct vbsf_handle *pHandle)
|
---|
[77458] | 522 | {
|
---|
| 523 | #ifdef VBOX_STRICT
|
---|
[77536] | 524 | struct vbsf_handle *pCur;
|
---|
[77458] | 525 | #endif
|
---|
[77526] | 526 | unsigned long fSavedFlags;
|
---|
[77458] | 527 |
|
---|
[77529] | 528 | SFLOGFLOW(("vbsf_handle_append: %p (to %p)\n", pHandle, pInodeInfo));
|
---|
[77549] | 529 | AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
|
---|
| 530 | ("%p %#x\n", pHandle, pHandle->fFlags));
|
---|
[77526] | 531 | Assert(pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
|
---|
[77458] | 532 |
|
---|
[77526] | 533 | spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
|
---|
[77458] | 534 |
|
---|
[77549] | 535 | AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
|
---|
[77526] | 536 | ("%p %#x\n", pHandle, pHandle->fFlags));
|
---|
[77458] | 537 | #ifdef VBOX_STRICT
|
---|
[77536] | 538 | RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) {
|
---|
[77526] | 539 | Assert(pCur != pHandle);
|
---|
[77549] | 540 | AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
|
---|
| 541 | == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
|
---|
[77526] | 542 | }
|
---|
| 543 | pHandle->pInodeInfo = pInodeInfo;
|
---|
[77458] | 544 | #endif
|
---|
| 545 |
|
---|
[77549] | 546 | pHandle->fFlags |= VBSF_HANDLE_F_ON_LIST;
|
---|
[77526] | 547 | RTListAppend(&pInodeInfo->HandleList, &pHandle->Entry);
|
---|
[77458] | 548 |
|
---|
[77526] | 549 | spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
|
---|
[77458] | 550 | }
|
---|
| 551 |
|
---|
| 552 |
|
---|
[77953] | 553 |
|
---|
[77939] | 554 | /*********************************************************************************************************************************
|
---|
| 555 | * Misc *
|
---|
| 556 | *********************************************************************************************************************************/
|
---|
[77873] | 557 |
|
---|
[85698] | 558 | #if RTLNX_VER_MAX(2,6,6)
|
---|
[77966] | 559 | /** Any writable mappings? */
|
---|
| 560 | DECLINLINE(bool) mapping_writably_mapped(struct address_space const *mapping)
|
---|
| 561 | {
|
---|
[85698] | 562 | # if RTLNX_VER_MIN(2,5,6)
|
---|
[77967] | 563 | return !list_empty(&mapping->i_mmap_shared);
|
---|
[77966] | 564 | # else
|
---|
| 565 | return mapping->i_mmap_shared != NULL;
|
---|
| 566 | # endif
|
---|
| 567 | }
|
---|
| 568 | #endif
|
---|
| 569 |
|
---|
| 570 |
|
---|
[85698] | 571 | #if RTLNX_VER_MAX(2,5,12)
|
---|
[77966] | 572 | /** Missing in 2.4.x, so just stub it for now. */
|
---|
| 573 | DECLINLINE(bool) PageWriteback(struct page const *page)
|
---|
| 574 | {
|
---|
| 575 | return false;
|
---|
| 576 | }
|
---|
| 577 | #endif
|
---|
| 578 |
|
---|
| 579 |
|
---|
[77939] | 580 | /**
|
---|
| 581 | * Helper for deciding wheter we should do a read via the page cache or not.
|
---|
| 582 | *
|
---|
| 583 | * By default we will only use the page cache if there is a writable memory
|
---|
| 584 | * mapping of the file with a chance that it may have modified any of the pages
|
---|
| 585 | * already.
|
---|
| 586 | */
|
---|
[77951] | 587 | DECLINLINE(bool) vbsf_should_use_cached_read(struct file *file, struct address_space *mapping, struct vbsf_super_info *pSuperInfo)
|
---|
[77939] | 588 | {
|
---|
[77953] | 589 | if ( (file->f_flags & O_DIRECT)
|
---|
| 590 | || pSuperInfo->enmCacheMode == kVbsfCacheMode_None)
|
---|
| 591 | return false;
|
---|
| 592 | if ( pSuperInfo->enmCacheMode == kVbsfCacheMode_Read
|
---|
| 593 | || pSuperInfo->enmCacheMode == kVbsfCacheMode_ReadWrite)
|
---|
| 594 | return true;
|
---|
| 595 | Assert(pSuperInfo->enmCacheMode == kVbsfCacheMode_Strict);
|
---|
[77939] | 596 | return mapping
|
---|
| 597 | && mapping->nrpages > 0
|
---|
[77953] | 598 | && mapping_writably_mapped(mapping);
|
---|
[77939] | 599 | }
|
---|
| 600 |
|
---|
| 601 |
|
---|
| 602 |
|
---|
[77873] | 603 | /*********************************************************************************************************************************
|
---|
[77953] | 604 | * Pipe / splice stuff mainly for 2.6.17 >= linux < 2.6.31 (where no fallbacks were available) *
|
---|
[77873] | 605 | *********************************************************************************************************************************/
|
---|
| 606 |
|
---|
[85698] | 607 | #if RTLNX_VER_RANGE(2,6,17, 3,16,0)
|
---|
[77943] | 608 |
|
---|
[85698] | 609 | # if RTLNX_VER_MAX(2,6,30)
|
---|
[77943] | 610 | # define LOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0)
|
---|
| 611 | # define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0)
|
---|
| 612 | # else
|
---|
| 613 | # define LOCK_PIPE(a_pPipe) pipe_lock(a_pPipe)
|
---|
| 614 | # define UNLOCK_PIPE(a_pPipe) pipe_unlock(a_pPipe)
|
---|
| 615 | # endif
|
---|
| 616 |
|
---|
| 617 |
|
---|
| 618 | /** Waits for the pipe buffer status to change. */
|
---|
| 619 | static void vbsf_wait_pipe(struct pipe_inode_info *pPipe)
|
---|
| 620 | {
|
---|
| 621 | DEFINE_WAIT(WaitStuff);
|
---|
| 622 | # ifdef TASK_NONINTERACTIVE
|
---|
| 623 | prepare_to_wait(&pPipe->wait, &WaitStuff, TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
|
---|
| 624 | # else
|
---|
| 625 | prepare_to_wait(&pPipe->wait, &WaitStuff, TASK_INTERRUPTIBLE);
|
---|
| 626 | # endif
|
---|
| 627 | UNLOCK_PIPE(pPipe);
|
---|
| 628 |
|
---|
| 629 | schedule();
|
---|
| 630 |
|
---|
| 631 | finish_wait(&pPipe->wait, &WaitStuff);
|
---|
| 632 | LOCK_PIPE(pPipe);
|
---|
| 633 | }
|
---|
| 634 |
|
---|
[77953] | 635 |
|
---|
[77943] | 636 | /** Worker for vbsf_feed_pages_to_pipe that wakes up readers. */
|
---|
| 637 | static void vbsf_wake_up_pipe(struct pipe_inode_info *pPipe, bool fReaders)
|
---|
| 638 | {
|
---|
| 639 | smp_mb();
|
---|
| 640 | if (waitqueue_active(&pPipe->wait))
|
---|
| 641 | wake_up_interruptible_sync(&pPipe->wait);
|
---|
| 642 | if (fReaders)
|
---|
| 643 | kill_fasync(&pPipe->fasync_readers, SIGIO, POLL_IN);
|
---|
| 644 | else
|
---|
| 645 | kill_fasync(&pPipe->fasync_writers, SIGIO, POLL_OUT);
|
---|
| 646 | }
|
---|
| 647 |
|
---|
| 648 | #endif
|
---|
[85698] | 649 | #if RTLNX_VER_RANGE(2,6,17, 2,6,31)
|
---|
[70460] | 650 |
|
---|
[77939] | 651 | /** Verify pipe buffer content (needed for page-cache to ensure idle page). */
|
---|
[77940] | 652 | static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
|
---|
[70460] | 653 | {
|
---|
[77939] | 654 | /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/
|
---|
[77526] | 655 | return 0;
|
---|
[70460] | 656 | }
|
---|
| 657 |
|
---|
[77943] | 658 |
|
---|
[77939] | 659 | /** Maps the buffer page. */
|
---|
[77940] | 660 | static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic)
|
---|
[70460] | 661 | {
|
---|
[77939] | 662 | void *pvRet;
|
---|
| 663 | if (!atomic)
|
---|
| 664 | pvRet = kmap(pPipeBuf->page);
|
---|
| 665 | else {
|
---|
| 666 | pPipeBuf->flags |= PIPE_BUF_FLAG_ATOMIC;
|
---|
| 667 | pvRet = kmap_atomic(pPipeBuf->page, KM_USER0);
|
---|
| 668 | }
|
---|
| 669 | /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/
|
---|
| 670 | return pvRet;
|
---|
[70460] | 671 | }
|
---|
| 672 |
|
---|
[77943] | 673 |
|
---|
[77939] | 674 | /** Unmaps the buffer page. */
|
---|
[77940] | 675 | static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping)
|
---|
[70460] | 676 | {
|
---|
[77939] | 677 | /*SFLOG3(("vbsf_pipe_buf_unmap: %p/%p\n", pPipeBuf, pvMapping)); */
|
---|
| 678 | if (!(pPipeBuf->flags & PIPE_BUF_FLAG_ATOMIC))
|
---|
| 679 | kunmap(pPipeBuf->page);
|
---|
| 680 | else {
|
---|
| 681 | pPipeBuf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
|
---|
| 682 | kunmap_atomic(pvMapping, KM_USER0);
|
---|
| 683 | }
|
---|
[70460] | 684 | }
|
---|
| 685 |
|
---|
[77943] | 686 |
|
---|
[77939] | 687 | /** Gets a reference to the page. */
|
---|
[77940] | 688 | static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
|
---|
[70786] | 689 | {
|
---|
[77939] | 690 | page_cache_get(pPipeBuf->page);
|
---|
| 691 | /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
|
---|
[70460] | 692 | }
|
---|
| 693 |
|
---|
[77943] | 694 |
|
---|
[77939] | 695 | /** Release the buffer page (counter to vbsf_pipe_buf_get). */
|
---|
[77940] | 696 | static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
|
---|
[70460] | 697 | {
|
---|
[77939] | 698 | /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
|
---|
| 699 | page_cache_release(pPipeBuf->page);
|
---|
[70460] | 700 | }
|
---|
| 701 |
|
---|
[77943] | 702 |
|
---|
[77939] | 703 | /** Attempt to steal the page.
|
---|
| 704 | * @returns 0 success, 1 on failure. */
|
---|
[77940] | 705 | static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
|
---|
[70460] | 706 | {
|
---|
[77939] | 707 | if (page_count(pPipeBuf->page) == 1) {
|
---|
| 708 | lock_page(pPipeBuf->page);
|
---|
| 709 | SFLOG3(("vbsf_pipe_buf_steal: %p -> 0\n", pPipeBuf));
|
---|
| 710 | return 0;
|
---|
| 711 | }
|
---|
| 712 | SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf));
|
---|
| 713 | return 1;
|
---|
[70460] | 714 | }
|
---|
| 715 |
|
---|
[77943] | 716 |
|
---|
[77939] | 717 | /**
|
---|
| 718 | * Pipe buffer operations for used by vbsf_feed_pages_to_pipe.
|
---|
| 719 | */
|
---|
[77529] | 720 | static struct pipe_buf_operations vbsf_pipe_buf_ops = {
|
---|
[77526] | 721 | .can_merge = 0,
|
---|
[85698] | 722 | # if RTLNX_VER_MIN(2,6,23)
|
---|
[77939] | 723 | .confirm = vbsf_pipe_buf_confirm,
|
---|
| 724 | # else
|
---|
| 725 | .pin = vbsf_pipe_buf_confirm,
|
---|
| 726 | # endif
|
---|
| 727 | .map = vbsf_pipe_buf_map,
|
---|
| 728 | .unmap = vbsf_pipe_buf_unmap,
|
---|
| 729 | .get = vbsf_pipe_buf_get,
|
---|
| 730 | .release = vbsf_pipe_buf_release,
|
---|
| 731 | .steal = vbsf_pipe_buf_steal,
|
---|
[70460] | 732 | };
|
---|
| 733 |
|
---|
[77939] | 734 |
|
---|
| 735 | /**
|
---|
| 736 | * Feeds the pages to the pipe.
|
---|
| 737 | *
|
---|
| 738 | * Pages given to the pipe are set to NULL in papPages.
|
---|
| 739 | */
|
---|
| 740 | static ssize_t vbsf_feed_pages_to_pipe(struct pipe_inode_info *pPipe, struct page **papPages, size_t cPages, uint32_t offPg0,
|
---|
| 741 | uint32_t cbActual, unsigned fFlags)
|
---|
[70460] | 742 | {
|
---|
[77939] | 743 | ssize_t cbRet = 0;
|
---|
| 744 | size_t iPage = 0;
|
---|
| 745 | bool fNeedWakeUp = false;
|
---|
[70460] | 746 |
|
---|
[77939] | 747 | LOCK_PIPE(pPipe);
|
---|
| 748 | for (;;) {
|
---|
| 749 | if ( pPipe->readers > 0
|
---|
| 750 | && pPipe->nrbufs < PIPE_BUFFERS) {
|
---|
| 751 | struct pipe_buffer *pPipeBuf = &pPipe->bufs[(pPipe->curbuf + pPipe->nrbufs) % PIPE_BUFFERS];
|
---|
| 752 | uint32_t const cbThisPage = RT_MIN(cbActual, PAGE_SIZE - offPg0);
|
---|
| 753 | pPipeBuf->len = cbThisPage;
|
---|
| 754 | pPipeBuf->offset = offPg0;
|
---|
[85698] | 755 | # if RTLNX_VER_MIN(2,6,23)
|
---|
[77939] | 756 | pPipeBuf->private = 0;
|
---|
| 757 | # endif
|
---|
| 758 | pPipeBuf->ops = &vbsf_pipe_buf_ops;
|
---|
| 759 | pPipeBuf->flags = fFlags & SPLICE_F_GIFT ? PIPE_BUF_FLAG_GIFT : 0;
|
---|
| 760 | pPipeBuf->page = papPages[iPage];
|
---|
[70460] | 761 |
|
---|
[77939] | 762 | papPages[iPage++] = NULL;
|
---|
| 763 | pPipe->nrbufs++;
|
---|
| 764 | fNeedWakeUp |= pPipe->inode != NULL;
|
---|
| 765 | offPg0 = 0;
|
---|
| 766 | cbRet += cbThisPage;
|
---|
[70460] | 767 |
|
---|
[77939] | 768 | /* done? */
|
---|
| 769 | cbActual -= cbThisPage;
|
---|
| 770 | if (!cbActual)
|
---|
[77526] | 771 | break;
|
---|
[77939] | 772 | } else if (pPipe->readers == 0) {
|
---|
| 773 | SFLOGFLOW(("vbsf_feed_pages_to_pipe: no readers!\n"));
|
---|
[77526] | 774 | send_sig(SIGPIPE, current, 0);
|
---|
[77939] | 775 | if (cbRet == 0)
|
---|
| 776 | cbRet = -EPIPE;
|
---|
| 777 | break;
|
---|
| 778 | } else if (fFlags & SPLICE_F_NONBLOCK) {
|
---|
| 779 | if (cbRet == 0)
|
---|
| 780 | cbRet = -EAGAIN;
|
---|
| 781 | break;
|
---|
| 782 | } else if (signal_pending(current)) {
|
---|
| 783 | if (cbRet == 0)
|
---|
| 784 | cbRet = -ERESTARTSYS;
|
---|
[77942] | 785 | SFLOGFLOW(("vbsf_feed_pages_to_pipe: pending signal! (%zd)\n", cbRet));
|
---|
[77939] | 786 | break;
|
---|
| 787 | } else {
|
---|
| 788 | if (fNeedWakeUp) {
|
---|
| 789 | vbsf_wake_up_pipe(pPipe, true /*fReaders*/);
|
---|
| 790 | fNeedWakeUp = 0;
|
---|
| 791 | }
|
---|
| 792 | pPipe->waiting_writers++;
|
---|
| 793 | vbsf_wait_pipe(pPipe);
|
---|
| 794 | pPipe->waiting_writers--;
|
---|
[77526] | 795 | }
|
---|
[77939] | 796 | }
|
---|
| 797 | UNLOCK_PIPE(pPipe);
|
---|
[70460] | 798 |
|
---|
[77939] | 799 | if (fNeedWakeUp)
|
---|
| 800 | vbsf_wake_up_pipe(pPipe, true /*fReaders*/);
|
---|
| 801 |
|
---|
| 802 | return cbRet;
|
---|
| 803 | }
|
---|
| 804 |
|
---|
| 805 |
|
---|
| 806 | /**
|
---|
| 807 | * For splicing from a file to a pipe.
|
---|
| 808 | */
|
---|
[77942] | 809 | static ssize_t vbsf_splice_read(struct file *file, loff_t *poffset, struct pipe_inode_info *pipe, size_t len, unsigned int flags)
|
---|
[77939] | 810 | {
|
---|
[77951] | 811 | struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
|
---|
| 812 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
[77939] | 813 | ssize_t cbRet;
|
---|
| 814 |
|
---|
[77942] | 815 | SFLOGFLOW(("vbsf_splice_read: file=%p poffset=%p{%#RX64} pipe=%p len=%#zx flags=%#x\n", file, poffset, *poffset, pipe, len, flags));
|
---|
[77951] | 816 | if (vbsf_should_use_cached_read(file, inode->i_mapping, pSuperInfo)) {
|
---|
[77942] | 817 | cbRet = generic_file_splice_read(file, poffset, pipe, len, flags);
|
---|
[77939] | 818 | } else {
|
---|
| 819 | /*
|
---|
| 820 | * Create a read request.
|
---|
| 821 | */
|
---|
| 822 | loff_t offFile = *poffset;
|
---|
| 823 | size_t cPages = RT_MIN(RT_ALIGN_Z((offFile & ~PAGE_CACHE_MASK) + len, PAGE_CACHE_SIZE) >> PAGE_CACHE_SHIFT,
|
---|
| 824 | PIPE_BUFFERS);
|
---|
| 825 | VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
|
---|
| 826 | PgLst.aPages[cPages]));
|
---|
| 827 | if (pReq) {
|
---|
| 828 | /*
|
---|
| 829 | * Allocate pages.
|
---|
| 830 | */
|
---|
| 831 | struct page *apPages[PIPE_BUFFERS];
|
---|
| 832 | size_t i;
|
---|
| 833 | pReq->PgLst.offFirstPage = (uint16_t)offFile & (uint16_t)PAGE_OFFSET_MASK;
|
---|
| 834 | cbRet = 0;
|
---|
| 835 | for (i = 0; i < cPages; i++) {
|
---|
| 836 | struct page *pPage;
|
---|
| 837 | apPages[i] = pPage = alloc_page(GFP_USER);
|
---|
| 838 | if (pPage) {
|
---|
| 839 | pReq->PgLst.aPages[i] = page_to_phys(pPage);
|
---|
| 840 | # ifdef VBOX_STRICT
|
---|
| 841 | ASMMemFill32(kmap(pPage), PAGE_SIZE, UINT32_C(0xdeadbeef));
|
---|
| 842 | kunmap(pPage);
|
---|
| 843 | # endif
|
---|
| 844 | } else {
|
---|
| 845 | cbRet = -ENOMEM;
|
---|
| 846 | break;
|
---|
| 847 | }
|
---|
[77526] | 848 | }
|
---|
[77939] | 849 | if (cbRet == 0) {
|
---|
| 850 | /*
|
---|
| 851 | * Do the reading.
|
---|
| 852 | */
|
---|
| 853 | uint32_t const cbToRead = RT_MIN((cPages << PAGE_SHIFT) - (offFile & PAGE_OFFSET_MASK), len);
|
---|
[77942] | 854 | struct vbsf_reg_info *sf_r = (struct vbsf_reg_info *)file->private_data;
|
---|
[77951] | 855 | int vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbToRead, cPages);
|
---|
[77939] | 856 | if (RT_SUCCESS(vrc)) {
|
---|
| 857 | /*
|
---|
| 858 | * Get the number of bytes read, jettison the request
|
---|
| 859 | * and, in case of EOF, any unnecessary pages.
|
---|
| 860 | */
|
---|
| 861 | uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
|
---|
| 862 | AssertStmt(cbActual <= cbToRead, cbActual = cbToRead);
|
---|
| 863 | SFLOG2(("vbsf_splice_read: read -> %#x bytes @ %#RX64\n", cbActual, offFile));
|
---|
| 864 |
|
---|
| 865 | VbglR0PhysHeapFree(pReq);
|
---|
| 866 | pReq = NULL;
|
---|
| 867 |
|
---|
| 868 | /*
|
---|
| 869 | * Now, feed it to the pipe thingy.
|
---|
| 870 | * This will take ownership of the all pages no matter what happens.
|
---|
| 871 | */
|
---|
| 872 | cbRet = vbsf_feed_pages_to_pipe(pipe, apPages, cPages, offFile & PAGE_OFFSET_MASK, cbActual, flags);
|
---|
| 873 | if (cbRet > 0)
|
---|
| 874 | *poffset = offFile + cbRet;
|
---|
| 875 | } else {
|
---|
| 876 | cbRet = -RTErrConvertToErrno(vrc);
|
---|
| 877 | SFLOGFLOW(("vbsf_splice_read: Read failed: %Rrc -> %zd\n", vrc, cbRet));
|
---|
| 878 | }
|
---|
| 879 | i = cPages;
|
---|
| 880 | }
|
---|
| 881 |
|
---|
| 882 | while (i-- > 0)
|
---|
| 883 | if (apPages[i])
|
---|
| 884 | __free_pages(apPages[i], 0);
|
---|
| 885 | if (pReq)
|
---|
| 886 | VbglR0PhysHeapFree(pReq);
|
---|
| 887 | } else {
|
---|
| 888 | cbRet = -ENOMEM;
|
---|
[77526] | 889 | }
|
---|
| 890 | }
|
---|
[77939] | 891 | SFLOGFLOW(("vbsf_splice_read: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset));
|
---|
| 892 | return cbRet;
|
---|
[70460] | 893 | }
|
---|
| 894 |
|
---|
[77943] | 895 | #endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31 */
|
---|
[85698] | 896 | #if RTLNX_VER_RANGE(2,6,17, 3,16,0)
|
---|
[77942] | 897 |
|
---|
| 898 | /**
|
---|
| 899 | * For splicing from a pipe to a file.
|
---|
[77943] | 900 | *
|
---|
| 901 | * Since we can combine buffers and request allocations, this should be faster
|
---|
| 902 | * than the default implementation.
|
---|
[77942] | 903 | */
|
---|
| 904 | static ssize_t vbsf_splice_write(struct pipe_inode_info *pPipe, struct file *file, loff_t *poffset, size_t len, unsigned int flags)
|
---|
| 905 | {
|
---|
[77951] | 906 | struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
|
---|
| 907 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
[77942] | 908 | ssize_t cbRet;
|
---|
| 909 |
|
---|
| 910 | SFLOGFLOW(("vbsf_splice_write: pPipe=%p file=%p poffset=%p{%#RX64} len=%#zx flags=%#x\n", pPipe, file, poffset, *poffset, len, flags));
|
---|
[77943] | 911 | /** @todo later if (false) {
|
---|
[77942] | 912 | cbRet = generic_file_splice_write(pPipe, file, poffset, len, flags);
|
---|
[77943] | 913 | } else */ {
|
---|
[77942] | 914 | /*
|
---|
| 915 | * Prepare a write request.
|
---|
| 916 | */
|
---|
[77943] | 917 | # ifdef PIPE_BUFFERS
|
---|
| 918 | uint32_t const cMaxPages = RT_MIN(PIPE_BUFFERS, RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT);
|
---|
| 919 | # else
|
---|
| 920 | uint32_t const cMaxPages = RT_MIN(RT_MAX(RT_MIN(pPipe->buffers, 256), PIPE_DEF_BUFFERS),
|
---|
| 921 | RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT);
|
---|
| 922 | # endif
|
---|
| 923 | VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
|
---|
| 924 | PgLst.aPages[cMaxPages]));
|
---|
[77942] | 925 | if (pReq) {
|
---|
| 926 | /*
|
---|
| 927 | * Feed from the pipe.
|
---|
| 928 | */
|
---|
[77943] | 929 | struct vbsf_reg_info *sf_r = (struct vbsf_reg_info *)file->private_data;
|
---|
| 930 | struct address_space *mapping = inode->i_mapping;
|
---|
| 931 | loff_t offFile = *poffset;
|
---|
| 932 | bool fNeedWakeUp = false;
|
---|
[77942] | 933 | cbRet = 0;
|
---|
| 934 |
|
---|
| 935 | LOCK_PIPE(pPipe);
|
---|
| 936 |
|
---|
| 937 | for (;;) {
|
---|
| 938 | unsigned cBufs = pPipe->nrbufs;
|
---|
| 939 | /*SFLOG2(("vbsf_splice_write: nrbufs=%#x curbuf=%#x\n", cBufs, pPipe->curbuf));*/
|
---|
| 940 | if (cBufs) {
|
---|
| 941 | /*
|
---|
| 942 | * There is data available. Write it to the file.
|
---|
| 943 | */
|
---|
| 944 | int vrc;
|
---|
| 945 | struct pipe_buffer *pPipeBuf = &pPipe->bufs[pPipe->curbuf];
|
---|
| 946 | uint32_t cPagesToWrite = 1;
|
---|
| 947 | uint32_t cbToWrite = pPipeBuf->len;
|
---|
| 948 |
|
---|
| 949 | Assert(pPipeBuf->offset < PAGE_SIZE);
|
---|
| 950 | Assert(pPipeBuf->offset + pPipeBuf->len <= PAGE_SIZE);
|
---|
| 951 |
|
---|
| 952 | pReq->PgLst.offFirstPage = pPipeBuf->offset & PAGE_OFFSET;
|
---|
| 953 | pReq->PgLst.aPages[0] = page_to_phys(pPipeBuf->page);
|
---|
| 954 |
|
---|
| 955 | /* Add any adjacent page buffers: */
|
---|
| 956 | while ( cPagesToWrite < cBufs
|
---|
| 957 | && cPagesToWrite < cMaxPages
|
---|
| 958 | && ((pReq->PgLst.offFirstPage + cbToWrite) & PAGE_OFFSET_MASK) == 0) {
|
---|
[77943] | 959 | # ifdef PIPE_BUFFERS
|
---|
[77942] | 960 | struct pipe_buffer *pPipeBuf2 = &pPipe->bufs[(pPipe->curbuf + cPagesToWrite) % PIPE_BUFFERS];
|
---|
[77943] | 961 | # else
|
---|
| 962 | struct pipe_buffer *pPipeBuf2 = &pPipe->bufs[(pPipe->curbuf + cPagesToWrite) % pPipe->buffers];
|
---|
| 963 | # endif
|
---|
[77942] | 964 | Assert(pPipeBuf2->len <= PAGE_SIZE);
|
---|
| 965 | Assert(pPipeBuf2->offset < PAGE_SIZE);
|
---|
| 966 | if (pPipeBuf2->offset != 0)
|
---|
| 967 | break;
|
---|
| 968 | pReq->PgLst.aPages[cPagesToWrite] = page_to_phys(pPipeBuf2->page);
|
---|
| 969 | cbToWrite += pPipeBuf2->len;
|
---|
| 970 | cPagesToWrite += 1;
|
---|
| 971 | }
|
---|
| 972 |
|
---|
[77943] | 973 | /* Check that we don't have signals pending before we issue the write, as
|
---|
| 974 | we'll only end up having to cancel the HGCM request 99% of the time: */
|
---|
[77959] | 975 | if (!signal_pending(current)) {
|
---|
[77961] | 976 | struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
|
---|
[77951] | 977 | vrc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile,
|
---|
| 978 | cbToWrite, cPagesToWrite);
|
---|
[77959] | 979 | sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
|
---|
| 980 | } else
|
---|
[77943] | 981 | vrc = VERR_INTERRUPTED;
|
---|
[77942] | 982 | if (RT_SUCCESS(vrc)) {
|
---|
| 983 | /*
|
---|
| 984 | * Get the number of bytes actually written, update file position
|
---|
| 985 | * and return value, and advance the pipe buffer.
|
---|
| 986 | */
|
---|
| 987 | uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
|
---|
| 988 | AssertStmt(cbActual <= cbToWrite, cbActual = cbToWrite);
|
---|
| 989 | SFLOG2(("vbsf_splice_write: write -> %#x bytes @ %#RX64\n", cbActual, offFile));
|
---|
| 990 |
|
---|
[77943] | 991 | cbRet += cbActual;
|
---|
[77942] | 992 |
|
---|
| 993 | while (cbActual > 0) {
|
---|
| 994 | uint32_t cbAdvance = RT_MIN(pPipeBuf->len, cbActual);
|
---|
[77943] | 995 |
|
---|
| 996 | vbsf_reg_write_sync_page_cache(mapping, offFile, cbAdvance, NULL,
|
---|
| 997 | &pPipeBuf->page, pPipeBuf->offset, 1);
|
---|
| 998 |
|
---|
| 999 | offFile += cbAdvance;
|
---|
[77942] | 1000 | cbActual -= cbAdvance;
|
---|
| 1001 | pPipeBuf->offset += cbAdvance;
|
---|
| 1002 | pPipeBuf->len -= cbAdvance;
|
---|
[77943] | 1003 |
|
---|
[77942] | 1004 | if (!pPipeBuf->len) {
|
---|
[77943] | 1005 | struct pipe_buf_operations const *pOps = pPipeBuf->ops;
|
---|
[77942] | 1006 | pPipeBuf->ops = NULL;
|
---|
| 1007 | pOps->release(pPipe, pPipeBuf);
|
---|
| 1008 |
|
---|
[77943] | 1009 | # ifdef PIPE_BUFFERS
|
---|
[77942] | 1010 | pPipe->curbuf = (pPipe->curbuf + 1) % PIPE_BUFFERS;
|
---|
[77943] | 1011 | # else
|
---|
| 1012 | pPipe->curbuf = (pPipe->curbuf + 1) % pPipe->buffers;
|
---|
| 1013 | # endif
|
---|
[77942] | 1014 | pPipe->nrbufs -= 1;
|
---|
| 1015 | pPipeBuf = &pPipe->bufs[pPipe->curbuf];
|
---|
| 1016 |
|
---|
[85698] | 1017 | # if RTLNX_VER_MAX(2,6,30)
|
---|
[77942] | 1018 | fNeedWakeUp |= pPipe->inode != NULL;
|
---|
[77943] | 1019 | # else
|
---|
| 1020 | fNeedWakeUp = true;
|
---|
| 1021 | # endif
|
---|
[77942] | 1022 | } else {
|
---|
| 1023 | Assert(cbActual == 0);
|
---|
| 1024 | break;
|
---|
| 1025 | }
|
---|
| 1026 | }
|
---|
[77943] | 1027 |
|
---|
| 1028 | *poffset = offFile;
|
---|
[77942] | 1029 | } else {
|
---|
| 1030 | if (cbRet == 0)
|
---|
[77943] | 1031 | cbRet = vrc == VERR_INTERRUPTED ? -ERESTARTSYS : -RTErrConvertToErrno(vrc);
|
---|
[77942] | 1032 | SFLOGFLOW(("vbsf_splice_write: Write failed: %Rrc -> %zd (cbRet=%#zx)\n",
|
---|
| 1033 | vrc, -RTErrConvertToErrno(vrc), cbRet));
|
---|
| 1034 | break;
|
---|
| 1035 | }
|
---|
| 1036 | } else {
|
---|
| 1037 | /*
|
---|
| 1038 | * Wait for data to become available, if there is chance that'll happen.
|
---|
| 1039 | */
|
---|
| 1040 | /* Quit if there are no writers (think EOF): */
|
---|
| 1041 | if (pPipe->writers == 0) {
|
---|
| 1042 | SFLOGFLOW(("vbsf_splice_write: No buffers. No writers. The show is done!\n"));
|
---|
| 1043 | break;
|
---|
| 1044 | }
|
---|
| 1045 |
|
---|
| 1046 | /* Quit if if we've written some and no writers waiting on the lock: */
|
---|
| 1047 | if (cbRet > 0 && pPipe->waiting_writers == 0) {
|
---|
| 1048 | SFLOGFLOW(("vbsf_splice_write: No waiting writers, returning what we've got.\n"));
|
---|
| 1049 | break;
|
---|
| 1050 | }
|
---|
| 1051 |
|
---|
| 1052 | /* Quit with EAGAIN if non-blocking: */
|
---|
| 1053 | if (flags & SPLICE_F_NONBLOCK) {
|
---|
| 1054 | if (cbRet == 0)
|
---|
| 1055 | cbRet = -EAGAIN;
|
---|
| 1056 | break;
|
---|
| 1057 | }
|
---|
| 1058 |
|
---|
| 1059 | /* Quit if we've got pending signals: */
|
---|
| 1060 | if (signal_pending(current)) {
|
---|
| 1061 | if (cbRet == 0)
|
---|
| 1062 | cbRet = -ERESTARTSYS;
|
---|
| 1063 | SFLOGFLOW(("vbsf_splice_write: pending signal! (%zd)\n", cbRet));
|
---|
| 1064 | break;
|
---|
| 1065 | }
|
---|
| 1066 |
|
---|
| 1067 | /* Wake up writers before we start waiting: */
|
---|
| 1068 | if (fNeedWakeUp) {
|
---|
| 1069 | vbsf_wake_up_pipe(pPipe, false /*fReaders*/);
|
---|
| 1070 | fNeedWakeUp = false;
|
---|
| 1071 | }
|
---|
| 1072 | vbsf_wait_pipe(pPipe);
|
---|
| 1073 | }
|
---|
| 1074 | } /* feed loop */
|
---|
| 1075 |
|
---|
| 1076 | if (fNeedWakeUp)
|
---|
| 1077 | vbsf_wake_up_pipe(pPipe, false /*fReaders*/);
|
---|
| 1078 |
|
---|
| 1079 | UNLOCK_PIPE(pPipe);
|
---|
| 1080 |
|
---|
| 1081 | VbglR0PhysHeapFree(pReq);
|
---|
| 1082 | } else {
|
---|
| 1083 | cbRet = -ENOMEM;
|
---|
| 1084 | }
|
---|
| 1085 | }
|
---|
| 1086 | SFLOGFLOW(("vbsf_splice_write: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset));
|
---|
| 1087 | return cbRet;
|
---|
| 1088 | }
|
---|
| 1089 |
|
---|
[77943] | 1090 | #endif /* 2.6.17 <= LINUX_VERSION_CODE < 3.16.0 */
|
---|
[70460] | 1091 |
|
---|
[85698] | 1092 | #if RTLNX_VER_RANGE(2,5,30, 2,6,23)
|
---|
[77944] | 1093 | /**
|
---|
| 1094 | * Our own senfile implementation that does not go via the page cache like
|
---|
| 1095 | * generic_file_sendfile() does.
|
---|
| 1096 | */
|
---|
[77946] | 1097 | static ssize_t vbsf_reg_sendfile(struct file *pFile, loff_t *poffFile, size_t cbToSend, read_actor_t pfnActor,
|
---|
[85698] | 1098 | # if RTLNX_VER_MIN(2,6,8)
|
---|
[77946] | 1099 | void *pvUser
|
---|
| 1100 | # else
|
---|
| 1101 | void __user *pvUser
|
---|
| 1102 | # endif
|
---|
| 1103 | )
|
---|
[77944] | 1104 | {
|
---|
[77951] | 1105 | struct inode *inode = VBSF_GET_F_DENTRY(pFile)->d_inode;
|
---|
| 1106 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
[77944] | 1107 | ssize_t cbRet;
|
---|
| 1108 | SFLOGFLOW(("vbsf_reg_sendfile: pFile=%p poffFile=%p{%#RX64} cbToSend=%#zx pfnActor=%p pvUser=%p\n",
|
---|
| 1109 | pFile, poffFile, poffFile ? *poffFile : 0, cbToSend, pfnActor, pvUser));
|
---|
[77951] | 1110 | Assert(pSuperInfo);
|
---|
[77873] | 1111 |
|
---|
[77944] | 1112 | /*
|
---|
| 1113 | * Return immediately if asked to send nothing.
|
---|
| 1114 | */
|
---|
| 1115 | if (cbToSend == 0)
|
---|
| 1116 | return 0;
|
---|
| 1117 |
|
---|
| 1118 | /*
|
---|
| 1119 | * Like for vbsf_reg_read() and vbsf_reg_read_iter(), we allow going via
|
---|
| 1120 | * the page cache in some cases or configs.
|
---|
| 1121 | */
|
---|
[77951] | 1122 | if (vbsf_should_use_cached_read(pFile, inode->i_mapping, pSuperInfo)) {
|
---|
[77944] | 1123 | cbRet = generic_file_sendfile(pFile, poffFile, cbToSend, pfnActor, pvUser);
|
---|
| 1124 | SFLOGFLOW(("vbsf_reg_sendfile: returns %#zx *poffFile=%#RX64 [generic_file_sendfile]\n", cbRet, poffFile ? *poffFile : UINT64_MAX));
|
---|
| 1125 | } else {
|
---|
| 1126 | /*
|
---|
| 1127 | * Allocate a request and a bunch of pages for reading from the file.
|
---|
| 1128 | */
|
---|
| 1129 | struct page *apPages[16];
|
---|
| 1130 | loff_t offFile = poffFile ? *poffFile : 0;
|
---|
| 1131 | size_t const cPages = cbToSend + ((size_t)offFile & PAGE_OFFSET_MASK) >= RT_ELEMENTS(apPages) * PAGE_SIZE
|
---|
| 1132 | ? RT_ELEMENTS(apPages)
|
---|
| 1133 | : RT_ALIGN_Z(cbToSend + ((size_t)offFile & PAGE_OFFSET_MASK), PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 1134 | size_t iPage;
|
---|
| 1135 | VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
|
---|
| 1136 | PgLst.aPages[cPages]));
|
---|
| 1137 | if (pReq) {
|
---|
| 1138 | Assert(cPages > 0);
|
---|
| 1139 | cbRet = 0;
|
---|
| 1140 | for (iPage = 0; iPage < cPages; iPage++) {
|
---|
| 1141 | struct page *pPage;
|
---|
| 1142 | apPages[iPage] = pPage = alloc_page(GFP_USER);
|
---|
| 1143 | if (pPage) {
|
---|
| 1144 | Assert(page_count(pPage) == 1);
|
---|
| 1145 | pReq->PgLst.aPages[iPage] = page_to_phys(pPage);
|
---|
| 1146 | } else {
|
---|
| 1147 | while (iPage-- > 0)
|
---|
| 1148 | vbsf_put_page(apPages[iPage]);
|
---|
| 1149 | cbRet = -ENOMEM;
|
---|
| 1150 | break;
|
---|
| 1151 | }
|
---|
| 1152 | }
|
---|
| 1153 | if (cbRet == 0) {
|
---|
| 1154 | /*
|
---|
| 1155 | * Do the job.
|
---|
| 1156 | */
|
---|
| 1157 | struct vbsf_reg_info *sf_r = (struct vbsf_reg_info *)pFile->private_data;
|
---|
| 1158 | read_descriptor_t RdDesc;
|
---|
| 1159 | RdDesc.count = cbToSend;
|
---|
[85698] | 1160 | # if RTLNX_VER_MIN(2,6,8)
|
---|
[77944] | 1161 | RdDesc.arg.data = pvUser;
|
---|
[77946] | 1162 | # else
|
---|
| 1163 | RdDesc.buf = pvUser;
|
---|
| 1164 | # endif
|
---|
[77944] | 1165 | RdDesc.written = 0;
|
---|
| 1166 | RdDesc.error = 0;
|
---|
| 1167 |
|
---|
| 1168 | Assert(sf_r);
|
---|
| 1169 | Assert((sf_r->Handle.fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC);
|
---|
| 1170 |
|
---|
| 1171 | while (cbToSend > 0) {
|
---|
| 1172 | /*
|
---|
| 1173 | * Read another chunk. For paranoid reasons, we keep data where the page cache
|
---|
| 1174 | * would keep it, i.e. page offset bits corresponds to the file offset bits.
|
---|
| 1175 | */
|
---|
| 1176 | uint32_t const offPg0 = (uint32_t)offFile & (uint32_t)PAGE_OFFSET_MASK;
|
---|
| 1177 | uint32_t const cbToRead = RT_MIN((cPages << PAGE_SHIFT) - offPg0, cbToSend);
|
---|
| 1178 | uint32_t const cPagesToRead = RT_ALIGN_Z(cbToRead + offPg0, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 1179 | int vrc;
|
---|
| 1180 | pReq->PgLst.offFirstPage = (uint16_t)offPg0;
|
---|
| 1181 | if (!signal_pending(current))
|
---|
[77951] | 1182 | vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile,
|
---|
| 1183 | cbToRead, cPagesToRead);
|
---|
[77944] | 1184 | else
|
---|
| 1185 | vrc = VERR_INTERRUPTED;
|
---|
| 1186 | if (RT_SUCCESS(vrc)) {
|
---|
| 1187 | /*
|
---|
| 1188 | * Pass what we read to the actor.
|
---|
| 1189 | */
|
---|
| 1190 | uint32_t off = offPg0;
|
---|
| 1191 | uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
|
---|
| 1192 | bool const fIsEof = cbActual < cbToRead;
|
---|
| 1193 | AssertStmt(cbActual <= cbToRead, cbActual = cbToRead);
|
---|
| 1194 | SFLOG3(("vbsf_reg_sendfile: Read %#x bytes (offPg0=%#x), wanted %#x ...\n", cbActual, offPg0, cbToRead));
|
---|
| 1195 |
|
---|
| 1196 | iPage = 0;
|
---|
| 1197 | while (cbActual > 0) {
|
---|
| 1198 | uint32_t const cbPage = RT_MIN(cbActual, PAGE_SIZE - off);
|
---|
| 1199 | int const cbRetActor = pfnActor(&RdDesc, apPages[iPage], off, cbPage);
|
---|
| 1200 | Assert(cbRetActor >= 0); /* Returns zero on failure, with RdDesc.error holding the status code. */
|
---|
| 1201 |
|
---|
| 1202 | AssertMsg(iPage < cPages && iPage < cPagesToRead, ("iPage=%#x cPages=%#x cPagesToRead=%#x\n", iPage, cPages, cPagesToRead));
|
---|
| 1203 |
|
---|
| 1204 | offFile += cbRetActor;
|
---|
| 1205 | if ((uint32_t)cbRetActor == cbPage && RdDesc.count > 0) {
|
---|
| 1206 | cbActual -= cbPage;
|
---|
| 1207 | cbToSend -= cbPage;
|
---|
| 1208 | iPage++;
|
---|
| 1209 | } else {
|
---|
| 1210 | SFLOG3(("vbsf_reg_sendfile: cbRetActor=%#x (%d) cbPage=%#x RdDesc{count=%#lx error=%d} iPage=%#x/%#x/%#x cbToSend=%#zx\n",
|
---|
| 1211 | cbRetActor, cbRetActor, cbPage, RdDesc.count, RdDesc.error, iPage, cPagesToRead, cPages, cbToSend));
|
---|
| 1212 | vrc = VERR_CALLBACK_RETURN;
|
---|
| 1213 | break;
|
---|
| 1214 | }
|
---|
| 1215 | off = 0;
|
---|
| 1216 | }
|
---|
| 1217 |
|
---|
| 1218 | /*
|
---|
| 1219 | * Are we done yet?
|
---|
| 1220 | */
|
---|
| 1221 | if (RT_FAILURE_NP(vrc) || cbToSend == 0 || RdDesc.error != 0 || fIsEof) {
|
---|
| 1222 | break;
|
---|
| 1223 | }
|
---|
| 1224 |
|
---|
| 1225 | /*
|
---|
| 1226 | * Replace pages held by the actor.
|
---|
| 1227 | */
|
---|
| 1228 | vrc = VINF_SUCCESS;
|
---|
| 1229 | for (iPage = 0; iPage < cPages; iPage++) {
|
---|
| 1230 | struct page *pPage = apPages[iPage];
|
---|
| 1231 | if (page_count(pPage) != 1) {
|
---|
| 1232 | struct page *pNewPage = alloc_page(GFP_USER);
|
---|
| 1233 | if (pNewPage) {
|
---|
| 1234 | SFLOGFLOW(("vbsf_reg_sendfile: Replacing page #%x: %p -> %p\n", iPage, pPage, pNewPage));
|
---|
| 1235 | vbsf_put_page(pPage);
|
---|
| 1236 | apPages[iPage] = pNewPage;
|
---|
| 1237 | } else {
|
---|
| 1238 | SFLOGFLOW(("vbsf_reg_sendfile: Failed to allocate a replacement page.\n"));
|
---|
| 1239 | vrc = VERR_NO_MEMORY;
|
---|
| 1240 | break;
|
---|
| 1241 | }
|
---|
| 1242 | }
|
---|
| 1243 | }
|
---|
| 1244 | if (RT_FAILURE(vrc))
|
---|
| 1245 | break; /* RdDesc.written should be non-zero, so don't bother with setting error. */
|
---|
| 1246 | } else {
|
---|
| 1247 | RdDesc.error = vrc == VERR_INTERRUPTED ? -ERESTARTSYS : -RTErrConvertToErrno(vrc);
|
---|
| 1248 | SFLOGFLOW(("vbsf_reg_sendfile: Read failed: %Rrc -> %zd (RdDesc.error=%#d)\n",
|
---|
| 1249 | vrc, -RTErrConvertToErrno(vrc), RdDesc.error));
|
---|
| 1250 | break;
|
---|
| 1251 | }
|
---|
| 1252 | }
|
---|
| 1253 |
|
---|
| 1254 | /*
|
---|
| 1255 | * Free memory.
|
---|
| 1256 | */
|
---|
| 1257 | for (iPage = 0; iPage < cPages; iPage++)
|
---|
| 1258 | vbsf_put_page(apPages[iPage]);
|
---|
| 1259 |
|
---|
| 1260 | /*
|
---|
| 1261 | * Set the return values.
|
---|
| 1262 | */
|
---|
| 1263 | if (RdDesc.written) {
|
---|
| 1264 | cbRet = RdDesc.written;
|
---|
| 1265 | if (poffFile)
|
---|
| 1266 | *poffFile = offFile;
|
---|
| 1267 | } else {
|
---|
| 1268 | cbRet = RdDesc.error;
|
---|
| 1269 | }
|
---|
| 1270 | }
|
---|
| 1271 | VbglR0PhysHeapFree(pReq);
|
---|
| 1272 | } else {
|
---|
| 1273 | cbRet = -ENOMEM;
|
---|
| 1274 | }
|
---|
| 1275 | SFLOGFLOW(("vbsf_reg_sendfile: returns %#zx offFile=%#RX64\n", cbRet, offFile));
|
---|
| 1276 | }
|
---|
| 1277 | return cbRet;
|
---|
| 1278 | }
|
---|
| 1279 | #endif /* 2.5.30 <= LINUX_VERSION_CODE < 2.6.23 */
|
---|
| 1280 |
|
---|
| 1281 |
|
---|
[77873] | 1282 | /*********************************************************************************************************************************
|
---|
| 1283 | * File operations on regular files *
|
---|
| 1284 | *********************************************************************************************************************************/
|
---|
| 1285 |
|
---|
[77549] | 1286 | /** Wrapper around put_page / page_cache_release. */
|
---|
| 1287 | DECLINLINE(void) vbsf_put_page(struct page *pPage)
|
---|
| 1288 | {
|
---|
[85698] | 1289 | #if RTLNX_VER_MIN(4,6,0)
|
---|
[77549] | 1290 | put_page(pPage);
|
---|
| 1291 | #else
|
---|
| 1292 | page_cache_release(pPage);
|
---|
| 1293 | #endif
|
---|
| 1294 | }
|
---|
[77089] | 1295 |
|
---|
[77549] | 1296 |
|
---|
| 1297 | /** Wrapper around get_page / page_cache_get. */
|
---|
| 1298 | DECLINLINE(void) vbsf_get_page(struct page *pPage)
|
---|
| 1299 | {
|
---|
[85698] | 1300 | #if RTLNX_VER_MIN(4,6,0)
|
---|
[77549] | 1301 | get_page(pPage);
|
---|
| 1302 | #else
|
---|
| 1303 | page_cache_get(pPage);
|
---|
| 1304 | #endif
|
---|
| 1305 | }
|
---|
| 1306 |
|
---|
| 1307 |
|
---|
[77529] | 1308 | /** Companion to vbsf_lock_user_pages(). */
|
---|
[78204] | 1309 | static void vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack)
|
---|
[77089] | 1310 | {
|
---|
[77549] | 1311 | /* We don't mark kernel pages dirty: */
|
---|
| 1312 | if (fLockPgHack)
|
---|
| 1313 | fSetDirty = false;
|
---|
| 1314 |
|
---|
[77089] | 1315 | while (cPages-- > 0)
|
---|
| 1316 | {
|
---|
[77549] | 1317 | struct page *pPage = papPages[cPages];
|
---|
[77966] | 1318 | Assert((ssize_t)cPages >= 0);
|
---|
[77549] | 1319 | if (fSetDirty && !PageReserved(pPage))
|
---|
[77967] | 1320 | set_page_dirty(pPage);
|
---|
[77549] | 1321 | vbsf_put_page(pPage);
|
---|
| 1322 | }
|
---|
| 1323 | }
|
---|
| 1324 |
|
---|
| 1325 |
|
---|
| 1326 | /**
|
---|
[77626] | 1327 | * Worker for vbsf_lock_user_pages_failed_check_kernel() and
|
---|
| 1328 | * vbsf_iter_lock_pages().
|
---|
| 1329 | */
|
---|
| 1330 | static int vbsf_lock_kernel_pages(uint8_t *pbStart, bool fWrite, size_t cPages, struct page **papPages)
|
---|
| 1331 | {
|
---|
| 1332 | uintptr_t const uPtrFrom = (uintptr_t)pbStart;
|
---|
| 1333 | uintptr_t const uPtrLast = (uPtrFrom & ~(uintptr_t)PAGE_OFFSET_MASK) + (cPages << PAGE_SHIFT) - 1;
|
---|
| 1334 | uint8_t *pbPage = (uint8_t *)uPtrLast;
|
---|
| 1335 | size_t iPage = cPages;
|
---|
| 1336 |
|
---|
| 1337 | /*
|
---|
| 1338 | * Touch the pages first (paranoia^2).
|
---|
| 1339 | */
|
---|
| 1340 | if (fWrite) {
|
---|
| 1341 | uint8_t volatile *pbProbe = (uint8_t volatile *)uPtrFrom;
|
---|
| 1342 | while (iPage-- > 0) {
|
---|
| 1343 | *pbProbe = *pbProbe;
|
---|
| 1344 | pbProbe += PAGE_SIZE;
|
---|
| 1345 | }
|
---|
| 1346 | } else {
|
---|
| 1347 | uint8_t const *pbProbe = (uint8_t const *)uPtrFrom;
|
---|
| 1348 | while (iPage-- > 0) {
|
---|
| 1349 | ASMProbeReadByte(pbProbe);
|
---|
| 1350 | pbProbe += PAGE_SIZE;
|
---|
| 1351 | }
|
---|
| 1352 | }
|
---|
| 1353 |
|
---|
| 1354 | /*
|
---|
| 1355 | * Get the pages.
|
---|
| 1356 | * Note! Fixes here probably applies to rtR0MemObjNativeLockKernel as well.
|
---|
| 1357 | */
|
---|
| 1358 | iPage = cPages;
|
---|
| 1359 | if ( uPtrFrom >= (unsigned long)__va(0)
|
---|
| 1360 | && uPtrLast < (unsigned long)high_memory) {
|
---|
| 1361 | /* The physical page mapping area: */
|
---|
| 1362 | while (iPage-- > 0) {
|
---|
| 1363 | struct page *pPage = papPages[iPage] = virt_to_page(pbPage);
|
---|
| 1364 | vbsf_get_page(pPage);
|
---|
| 1365 | pbPage -= PAGE_SIZE;
|
---|
| 1366 | }
|
---|
| 1367 | } else {
|
---|
| 1368 | /* This is vmalloc or some such thing, so go thru page tables: */
|
---|
| 1369 | while (iPage-- > 0) {
|
---|
| 1370 | struct page *pPage = rtR0MemObjLinuxVirtToPage(pbPage);
|
---|
| 1371 | if (pPage) {
|
---|
| 1372 | papPages[iPage] = pPage;
|
---|
| 1373 | vbsf_get_page(pPage);
|
---|
| 1374 | pbPage -= PAGE_SIZE;
|
---|
| 1375 | } else {
|
---|
| 1376 | while (++iPage < cPages) {
|
---|
| 1377 | pPage = papPages[iPage];
|
---|
| 1378 | vbsf_put_page(pPage);
|
---|
| 1379 | }
|
---|
| 1380 | return -EFAULT;
|
---|
| 1381 | }
|
---|
| 1382 | }
|
---|
| 1383 | }
|
---|
| 1384 | return 0;
|
---|
| 1385 | }
|
---|
| 1386 |
|
---|
| 1387 |
|
---|
| 1388 | /**
|
---|
[77549] | 1389 | * Catches kernel_read() and kernel_write() calls and works around them.
|
---|
| 1390 | *
|
---|
| 1391 | * The file_operations::read and file_operations::write callbacks supposedly
|
---|
| 1392 | * hands us the user buffers to read into and write out of. To allow the kernel
|
---|
| 1393 | * to read and write without allocating buffers in userland, they kernel_read()
|
---|
| 1394 | * and kernel_write() increases the user space address limit before calling us
|
---|
| 1395 | * so that copyin/copyout won't reject it. Our problem is that get_user_pages()
|
---|
| 1396 | * works on the userspace address space structures and will not be fooled by an
|
---|
| 1397 | * increased addr_limit.
|
---|
| 1398 | *
|
---|
| 1399 | * This code tries to detect this situation and fake get_user_lock() for the
|
---|
| 1400 | * kernel buffer.
|
---|
| 1401 | */
|
---|
| 1402 | static int vbsf_lock_user_pages_failed_check_kernel(uintptr_t uPtrFrom, size_t cPages, bool fWrite, int rcFailed,
|
---|
| 1403 | struct page **papPages, bool *pfLockPgHack)
|
---|
| 1404 | {
|
---|
| 1405 | /*
|
---|
| 1406 | * Check that this is valid user memory that is actually in the kernel range.
|
---|
| 1407 | */
|
---|
[87053] | 1408 | #if RTLNX_VER_MIN(5,10,0)
|
---|
[77549] | 1409 | if ( access_ok((void *)uPtrFrom, cPages << PAGE_SHIFT)
|
---|
[87053] | 1410 | && uPtrFrom >= TASK_SIZE_MAX)
|
---|
| 1411 | #elif RTLNX_VER_MIN(5,0,0) || RTLNX_RHEL_MIN(8,1)
|
---|
| 1412 | if ( access_ok((void *)uPtrFrom, cPages << PAGE_SHIFT)
|
---|
[77549] | 1413 | && uPtrFrom >= USER_DS.seg)
|
---|
[77089] | 1414 | #else
|
---|
[77549] | 1415 | if ( access_ok(fWrite ? VERIFY_WRITE : VERIFY_READ, (void *)uPtrFrom, cPages << PAGE_SHIFT)
|
---|
| 1416 | && uPtrFrom >= USER_DS.seg)
|
---|
[77089] | 1417 | #endif
|
---|
[77549] | 1418 | {
|
---|
[77626] | 1419 | int rc = vbsf_lock_kernel_pages((uint8_t *)uPtrFrom, fWrite, cPages, papPages);
|
---|
| 1420 | if (rc == 0) {
|
---|
| 1421 | *pfLockPgHack = true;
|
---|
| 1422 | return 0;
|
---|
[77549] | 1423 | }
|
---|
[77089] | 1424 | }
|
---|
[77549] | 1425 |
|
---|
| 1426 | return rcFailed;
|
---|
[77089] | 1427 | }
|
---|
| 1428 |
|
---|
| 1429 |
|
---|
| 1430 | /** Wrapper around get_user_pages. */
|
---|
[77549] | 1431 | DECLINLINE(int) vbsf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages, bool *pfLockPgHack)
|
---|
[77089] | 1432 | {
|
---|
[85693] | 1433 | # if RTLNX_VER_MIN(4,9,0) \
|
---|
| 1434 | || (defined(CONFIG_SUSE_KERNEL) && RTLNX_VER_RANGE(4,4,73, 4,4,74) /** @todo Figure out when & what exactly. */) \
|
---|
| 1435 | || (defined(CONFIG_SUSE_KERNEL) && RTLNX_VER_RANGE(4,4,75, 4,4,90) /** @todo Figure out when & what exactly. */) \
|
---|
| 1436 | || (defined(CONFIG_SUSE_KERNEL) && RTLNX_VER_RANGE(4,4,92, 4,5,0) /** @todo Figure out when & what exactly. */)
|
---|
[77526] | 1437 | ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, papPages,
|
---|
[77741] | 1438 | fWrite ? FOLL_WRITE | FOLL_FORCE : FOLL_FORCE);
|
---|
[85693] | 1439 | # elif RTLNX_VER_MIN(4,6,0)
|
---|
[77526] | 1440 | ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, fWrite, 1 /*force*/, papPages);
|
---|
[85693] | 1441 | # elif RTLNX_VER_RANGE(4,4,168, 4,5,0)
|
---|
[78135] | 1442 | ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, uPtrFrom, cPages, papPages,
|
---|
| 1443 | fWrite ? FOLL_WRITE | FOLL_FORCE : FOLL_FORCE);
|
---|
[85693] | 1444 | # elif RTLNX_VER_MIN(4,0,0)
|
---|
[77549] | 1445 | ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages);
|
---|
[77089] | 1446 | # else
|
---|
[77526] | 1447 | struct task_struct *pTask = current;
|
---|
[77966] | 1448 | ssize_t cPagesLocked;
|
---|
[77526] | 1449 | down_read(&pTask->mm->mmap_sem);
|
---|
[77873] | 1450 | cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages, NULL);
|
---|
[77526] | 1451 | up_read(&pTask->mm->mmap_sem);
|
---|
[77089] | 1452 | # endif
|
---|
[77549] | 1453 | *pfLockPgHack = false;
|
---|
[77526] | 1454 | if (cPagesLocked == cPages)
|
---|
| 1455 | return 0;
|
---|
[77549] | 1456 |
|
---|
| 1457 | /*
|
---|
| 1458 | * It failed.
|
---|
| 1459 | */
|
---|
[77526] | 1460 | if (cPagesLocked < 0)
|
---|
[77549] | 1461 | return vbsf_lock_user_pages_failed_check_kernel(uPtrFrom, cPages, fWrite, (int)cPagesLocked, papPages, pfLockPgHack);
|
---|
[77089] | 1462 |
|
---|
[77549] | 1463 | vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
|
---|
[77089] | 1464 |
|
---|
[77526] | 1465 | /* We could use uPtrFrom + cPagesLocked to get the correct status here... */
|
---|
| 1466 | return -EFAULT;
|
---|
[77089] | 1467 | }
|
---|
| 1468 |
|
---|
[88716] | 1469 | #if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */
|
---|
[77089] | 1470 |
|
---|
[30175] | 1471 | /**
|
---|
[77419] | 1472 | * Read function used when accessing files that are memory mapped.
|
---|
| 1473 | *
|
---|
| 1474 | * We read from the page cache here to present the a cohertent picture of the
|
---|
| 1475 | * the file content.
|
---|
| 1476 | */
|
---|
[77529] | 1477 | static ssize_t vbsf_reg_read_mapped(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off)
|
---|
[77419] | 1478 | {
|
---|
[88571] | 1479 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77526] | 1480 | struct iovec iov = { .iov_base = buf, .iov_len = size };
|
---|
| 1481 | struct iov_iter iter;
|
---|
| 1482 | struct kiocb kiocb;
|
---|
| 1483 | ssize_t cbRet;
|
---|
[77419] | 1484 |
|
---|
[77526] | 1485 | init_sync_kiocb(&kiocb, file);
|
---|
| 1486 | kiocb.ki_pos = *off;
|
---|
| 1487 | iov_iter_init(&iter, READ, &iov, 1, size);
|
---|
[77419] | 1488 |
|
---|
[77526] | 1489 | cbRet = generic_file_read_iter(&kiocb, &iter);
|
---|
[77419] | 1490 |
|
---|
[77526] | 1491 | *off = kiocb.ki_pos;
|
---|
| 1492 | return cbRet;
|
---|
[77419] | 1493 |
|
---|
[88571] | 1494 | # elif RTLNX_VER_MIN(2,6,19)
|
---|
[77526] | 1495 | struct iovec iov = { .iov_base = buf, .iov_len = size };
|
---|
| 1496 | struct kiocb kiocb;
|
---|
| 1497 | ssize_t cbRet;
|
---|
[77419] | 1498 |
|
---|
[77526] | 1499 | init_sync_kiocb(&kiocb, file);
|
---|
| 1500 | kiocb.ki_pos = *off;
|
---|
[77419] | 1501 |
|
---|
[77526] | 1502 | cbRet = generic_file_aio_read(&kiocb, &iov, 1, *off);
|
---|
| 1503 | if (cbRet == -EIOCBQUEUED)
|
---|
| 1504 | cbRet = wait_on_sync_kiocb(&kiocb);
|
---|
[77419] | 1505 |
|
---|
[77526] | 1506 | *off = kiocb.ki_pos;
|
---|
| 1507 | return cbRet;
|
---|
[77419] | 1508 |
|
---|
[88571] | 1509 | # else /* 2.6.18 or earlier: */
|
---|
[77526] | 1510 | return generic_file_read(file, buf, size, off);
|
---|
[88571] | 1511 | # endif
|
---|
[77419] | 1512 | }
|
---|
| 1513 |
|
---|
| 1514 |
|
---|
| 1515 | /**
|
---|
[77529] | 1516 | * Fallback case of vbsf_reg_read() that locks the user buffers and let the host
|
---|
[77138] | 1517 | * write directly to them.
|
---|
| 1518 | */
|
---|
[77549] | 1519 | static ssize_t vbsf_reg_read_locking(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off,
|
---|
[77951] | 1520 | struct vbsf_super_info *pSuperInfo, struct vbsf_reg_info *sf_r)
|
---|
[77138] | 1521 | {
|
---|
[77526] | 1522 | /*
|
---|
| 1523 | * Lock pages and execute the read, taking care not to pass the host
|
---|
| 1524 | * more than it can handle in one go or more than we care to allocate
|
---|
| 1525 | * page arrays for. The latter limit is set at just short of 32KB due
|
---|
| 1526 | * to how the physical heap works.
|
---|
| 1527 | */
|
---|
| 1528 | struct page *apPagesStack[16];
|
---|
| 1529 | struct page **papPages = &apPagesStack[0];
|
---|
| 1530 | struct page **papPagesFree = NULL;
|
---|
| 1531 | VBOXSFREADPGLSTREQ *pReq;
|
---|
| 1532 | loff_t offFile = *off;
|
---|
| 1533 | ssize_t cbRet = -ENOMEM;
|
---|
| 1534 | size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
|
---|
[77951] | 1535 | size_t cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages);
|
---|
[77549] | 1536 | bool fLockPgHack;
|
---|
[77138] | 1537 |
|
---|
[77526] | 1538 | pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
|
---|
| 1539 | while (!pReq && cMaxPages > 4) {
|
---|
| 1540 | cMaxPages /= 2;
|
---|
| 1541 | pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
|
---|
| 1542 | }
|
---|
[77626] | 1543 | if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
|
---|
[77526] | 1544 | papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
|
---|
| 1545 | if (pReq && papPages) {
|
---|
| 1546 | cbRet = 0;
|
---|
| 1547 | for (;;) {
|
---|
| 1548 | /*
|
---|
| 1549 | * Figure out how much to process now and lock the user pages.
|
---|
| 1550 | */
|
---|
| 1551 | int rc;
|
---|
| 1552 | size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK;
|
---|
| 1553 | pReq->PgLst.offFirstPage = (uint16_t)cbChunk;
|
---|
| 1554 | cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 1555 | if (cPages <= cMaxPages)
|
---|
| 1556 | cbChunk = size;
|
---|
| 1557 | else {
|
---|
| 1558 | cPages = cMaxPages;
|
---|
| 1559 | cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
|
---|
| 1560 | }
|
---|
[77138] | 1561 |
|
---|
[77549] | 1562 | rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages, &fLockPgHack);
|
---|
[77526] | 1563 | if (rc == 0) {
|
---|
| 1564 | size_t iPage = cPages;
|
---|
| 1565 | while (iPage-- > 0)
|
---|
| 1566 | pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
|
---|
| 1567 | } else {
|
---|
| 1568 | cbRet = rc;
|
---|
| 1569 | break;
|
---|
| 1570 | }
|
---|
[77138] | 1571 |
|
---|
[77526] | 1572 | /*
|
---|
| 1573 | * Issue the request and unlock the pages.
|
---|
| 1574 | */
|
---|
[77951] | 1575 | rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
|
---|
[77138] | 1576 |
|
---|
[77966] | 1577 | Assert(cPages <= cMaxPages);
|
---|
[77549] | 1578 | vbsf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/, fLockPgHack);
|
---|
[77138] | 1579 |
|
---|
[77526] | 1580 | if (RT_SUCCESS(rc)) {
|
---|
| 1581 | /*
|
---|
| 1582 | * Success, advance position and buffer.
|
---|
| 1583 | */
|
---|
| 1584 | uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
|
---|
| 1585 | AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
|
---|
| 1586 | cbRet += cbActual;
|
---|
| 1587 | offFile += cbActual;
|
---|
| 1588 | buf = (uint8_t *)buf + cbActual;
|
---|
| 1589 | size -= cbActual;
|
---|
[77138] | 1590 |
|
---|
[77526] | 1591 | /*
|
---|
| 1592 | * Are we done already? If so commit the new file offset.
|
---|
| 1593 | */
|
---|
| 1594 | if (!size || cbActual < cbChunk) {
|
---|
| 1595 | *off = offFile;
|
---|
| 1596 | break;
|
---|
| 1597 | }
|
---|
| 1598 | } else if (rc == VERR_NO_MEMORY && cMaxPages > 4) {
|
---|
| 1599 | /*
|
---|
| 1600 | * The host probably doesn't have enough heap to handle the
|
---|
| 1601 | * request, reduce the page count and retry.
|
---|
| 1602 | */
|
---|
| 1603 | cMaxPages /= 4;
|
---|
| 1604 | Assert(cMaxPages > 0);
|
---|
| 1605 | } else {
|
---|
| 1606 | /*
|
---|
| 1607 | * If we've successfully read stuff, return it rather than
|
---|
| 1608 | * the error. (Not sure if this is such a great idea...)
|
---|
| 1609 | */
|
---|
[77967] | 1610 | if (cbRet > 0) {
|
---|
| 1611 | SFLOGFLOW(("vbsf_reg_read: read at %#RX64 -> %Rrc; got cbRet=%#zx already\n", offFile, rc, cbRet));
|
---|
[77526] | 1612 | *off = offFile;
|
---|
[77967] | 1613 | } else {
|
---|
| 1614 | SFLOGFLOW(("vbsf_reg_read: read at %#RX64 -> %Rrc\n", offFile, rc));
|
---|
[77526] | 1615 | cbRet = -EPROTO;
|
---|
[77967] | 1616 | }
|
---|
[77526] | 1617 | break;
|
---|
| 1618 | }
|
---|
| 1619 | }
|
---|
| 1620 | }
|
---|
| 1621 | if (papPagesFree)
|
---|
| 1622 | kfree(papPages);
|
---|
| 1623 | if (pReq)
|
---|
| 1624 | VbglR0PhysHeapFree(pReq);
|
---|
[77966] | 1625 | SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off));
|
---|
[77526] | 1626 | return cbRet;
|
---|
[77138] | 1627 | }
|
---|
| 1628 |
|
---|
[88716] | 1629 |
|
---|
[77138] | 1630 | /**
|
---|
[30175] | 1631 | * Read from a regular file.
|
---|
| 1632 | *
|
---|
| 1633 | * @param file the file
|
---|
| 1634 | * @param buf the buffer
|
---|
| 1635 | * @param size length of the buffer
|
---|
[77064] | 1636 | * @param off offset within the file (in/out).
|
---|
[30175] | 1637 | * @returns the number of read bytes on success, Linux error code otherwise
|
---|
| 1638 | */
|
---|
[77529] | 1639 | static ssize_t vbsf_reg_read(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off)
|
---|
[2614] | 1640 | {
|
---|
[77951] | 1641 | struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
|
---|
| 1642 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
| 1643 | struct vbsf_reg_info *sf_r = file->private_data;
|
---|
| 1644 | struct address_space *mapping = inode->i_mapping;
|
---|
[2614] | 1645 |
|
---|
[77529] | 1646 | SFLOGFLOW(("vbsf_reg_read: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
|
---|
[77458] | 1647 |
|
---|
[77526] | 1648 | if (!S_ISREG(inode->i_mode)) {
|
---|
| 1649 | LogFunc(("read from non regular file %d\n", inode->i_mode));
|
---|
| 1650 | return -EINVAL;
|
---|
| 1651 | }
|
---|
[2614] | 1652 |
|
---|
[77526] | 1653 | /** @todo XXX Check read permission according to inode->i_mode! */
|
---|
[11507] | 1654 |
|
---|
[77526] | 1655 | if (!size)
|
---|
| 1656 | return 0;
|
---|
[2614] | 1657 |
|
---|
[77526] | 1658 | /*
|
---|
| 1659 | * If there is a mapping and O_DIRECT isn't in effect, we must at a
|
---|
| 1660 | * heed dirty pages in the mapping and read from them. For simplicity
|
---|
| 1661 | * though, we just do page cache reading when there are writable
|
---|
| 1662 | * mappings around with any kind of pages loaded.
|
---|
| 1663 | */
|
---|
[77951] | 1664 | if (vbsf_should_use_cached_read(file, mapping, pSuperInfo))
|
---|
[77529] | 1665 | return vbsf_reg_read_mapped(file, buf, size, off);
|
---|
[77419] | 1666 |
|
---|
[77526] | 1667 | /*
|
---|
| 1668 | * For small requests, try use an embedded buffer provided we get a heap block
|
---|
| 1669 | * that does not cross page boundraries (see host code).
|
---|
| 1670 | */
|
---|
| 1671 | if (size <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) /* see allocator */) {
|
---|
| 1672 | uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + size;
|
---|
| 1673 | VBOXSFREADEMBEDDEDREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
|
---|
[77626] | 1674 | if (pReq) {
|
---|
| 1675 | if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
|
---|
| 1676 | ssize_t cbRet;
|
---|
[77951] | 1677 | int vrc = VbglR0SfHostReqReadEmbedded(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, *off, (uint32_t)size);
|
---|
[77626] | 1678 | if (RT_SUCCESS(vrc)) {
|
---|
| 1679 | cbRet = pReq->Parms.cb32Read.u.value32;
|
---|
| 1680 | AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
|
---|
| 1681 | if (copy_to_user(buf, pReq->abData, cbRet) == 0)
|
---|
| 1682 | *off += cbRet;
|
---|
| 1683 | else
|
---|
| 1684 | cbRet = -EFAULT;
|
---|
| 1685 | } else
|
---|
| 1686 | cbRet = -EPROTO;
|
---|
| 1687 | VbglR0PhysHeapFree(pReq);
|
---|
[77966] | 1688 | SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [embed]\n", cbRet, cbRet, *off));
|
---|
[77626] | 1689 | return cbRet;
|
---|
| 1690 | }
|
---|
[77526] | 1691 | VbglR0PhysHeapFree(pReq);
|
---|
| 1692 | }
|
---|
| 1693 | }
|
---|
[77089] | 1694 |
|
---|
[88571] | 1695 | # if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
|
---|
[77526] | 1696 | /*
|
---|
| 1697 | * For medium sized requests try use a bounce buffer.
|
---|
| 1698 | */
|
---|
| 1699 | if (size <= _64K /** @todo make this configurable? */) {
|
---|
| 1700 | void *pvBounce = kmalloc(size, GFP_KERNEL);
|
---|
| 1701 | if (pvBounce) {
|
---|
| 1702 | VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
|
---|
| 1703 | if (pReq) {
|
---|
| 1704 | ssize_t cbRet;
|
---|
[77951] | 1705 | int vrc = VbglR0SfHostReqReadContig(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, *off,
|
---|
[77526] | 1706 | (uint32_t)size, pvBounce, virt_to_phys(pvBounce));
|
---|
| 1707 | if (RT_SUCCESS(vrc)) {
|
---|
| 1708 | cbRet = pReq->Parms.cb32Read.u.value32;
|
---|
| 1709 | AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
|
---|
| 1710 | if (copy_to_user(buf, pvBounce, cbRet) == 0)
|
---|
| 1711 | *off += cbRet;
|
---|
| 1712 | else
|
---|
| 1713 | cbRet = -EFAULT;
|
---|
| 1714 | } else
|
---|
| 1715 | cbRet = -EPROTO;
|
---|
| 1716 | VbglR0PhysHeapFree(pReq);
|
---|
| 1717 | kfree(pvBounce);
|
---|
[77966] | 1718 | SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [bounce]\n", cbRet, cbRet, *off));
|
---|
[77526] | 1719 | return cbRet;
|
---|
| 1720 | }
|
---|
| 1721 | kfree(pvBounce);
|
---|
| 1722 | }
|
---|
| 1723 | }
|
---|
[88571] | 1724 | # endif
|
---|
[77089] | 1725 |
|
---|
[77951] | 1726 | return vbsf_reg_read_locking(file, buf, size, off, pSuperInfo, sf_r);
|
---|
[2614] | 1727 | }
|
---|
[88716] | 1728 |
|
---|
[88571] | 1729 | #endif /* < 5.10.0 */
|
---|
[2614] | 1730 |
|
---|
[30175] | 1731 | /**
|
---|
[77770] | 1732 | * Helper the synchronizes the page cache content with something we just wrote
|
---|
| 1733 | * to the host.
|
---|
[77443] | 1734 | */
|
---|
[77943] | 1735 | static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
|
---|
| 1736 | uint8_t const *pbSrcBuf, struct page **papSrcPages,
|
---|
| 1737 | uint32_t offSrcPage, size_t cSrcPages)
|
---|
[77443] | 1738 | {
|
---|
[77853] | 1739 | Assert(offSrcPage < PAGE_SIZE);
|
---|
[77770] | 1740 | if (mapping && mapping->nrpages > 0) {
|
---|
| 1741 | /*
|
---|
| 1742 | * Work the pages in the write range.
|
---|
| 1743 | */
|
---|
| 1744 | while (cbRange > 0) {
|
---|
| 1745 | /*
|
---|
| 1746 | * Lookup the page at offFile. We're fine if there aren't
|
---|
| 1747 | * any there. We're skip if it's dirty or is being written
|
---|
| 1748 | * back, at least for now.
|
---|
| 1749 | */
|
---|
| 1750 | size_t const offDstPage = offFile & PAGE_OFFSET_MASK;
|
---|
| 1751 | size_t const cbToCopy = RT_MIN(PAGE_SIZE - offDstPage, cbRange);
|
---|
| 1752 | pgoff_t const idxPage = offFile >> PAGE_SHIFT;
|
---|
| 1753 | struct page *pDstPage = find_lock_page(mapping, idxPage);
|
---|
| 1754 | if (pDstPage) {
|
---|
| 1755 | if ( pDstPage->mapping == mapping /* ignore if re-purposed (paranoia) */
|
---|
| 1756 | && pDstPage->index == idxPage
|
---|
| 1757 | && !PageDirty(pDstPage) /* ignore if dirty */
|
---|
| 1758 | && !PageWriteback(pDstPage) /* ignore if being written back */ ) {
|
---|
| 1759 | /*
|
---|
| 1760 | * Map the page and do the copying.
|
---|
| 1761 | */
|
---|
| 1762 | uint8_t *pbDst = (uint8_t *)kmap(pDstPage);
|
---|
| 1763 | if (pbSrcBuf)
|
---|
| 1764 | memcpy(&pbDst[offDstPage], pbSrcBuf, cbToCopy);
|
---|
| 1765 | else {
|
---|
| 1766 | uint32_t const cbSrc0 = PAGE_SIZE - offSrcPage;
|
---|
| 1767 | uint8_t const *pbSrc = (uint8_t const *)kmap(papSrcPages[0]);
|
---|
[77853] | 1768 | AssertMsg(cSrcPages >= 1, ("offFile=%#llx cbRange=%#zx cbToCopy=%#zx\n", offFile, cbRange, cbToCopy));
|
---|
[77770] | 1769 | memcpy(&pbDst[offDstPage], &pbSrc[offSrcPage], RT_MIN(cbToCopy, cbSrc0));
|
---|
| 1770 | kunmap(papSrcPages[0]);
|
---|
| 1771 | if (cbToCopy > cbSrc0) {
|
---|
[77853] | 1772 | AssertMsg(cSrcPages >= 2, ("offFile=%#llx cbRange=%#zx cbToCopy=%#zx\n", offFile, cbRange, cbToCopy));
|
---|
[77770] | 1773 | pbSrc = (uint8_t const *)kmap(papSrcPages[1]);
|
---|
| 1774 | memcpy(&pbDst[offDstPage + cbSrc0], pbSrc, cbToCopy - cbSrc0);
|
---|
| 1775 | kunmap(papSrcPages[1]);
|
---|
| 1776 | }
|
---|
| 1777 | }
|
---|
| 1778 | kunmap(pDstPage);
|
---|
| 1779 | flush_dcache_page(pDstPage);
|
---|
| 1780 | if (cbToCopy == PAGE_SIZE)
|
---|
| 1781 | SetPageUptodate(pDstPage);
|
---|
[85698] | 1782 | # if RTLNX_VER_MIN(2,4,10)
|
---|
[77770] | 1783 | mark_page_accessed(pDstPage);
|
---|
[77443] | 1784 | # endif
|
---|
[77770] | 1785 | } else
|
---|
| 1786 | SFLOGFLOW(("vbsf_reg_write_sync_page_cache: Skipping page %p: mapping=%p (vs %p) writeback=%d offset=%#lx (vs%#lx)\n",
|
---|
| 1787 | pDstPage, pDstPage->mapping, mapping, PageWriteback(pDstPage), pDstPage->index, idxPage));
|
---|
| 1788 | unlock_page(pDstPage);
|
---|
| 1789 | vbsf_put_page(pDstPage);
|
---|
| 1790 | }
|
---|
| 1791 |
|
---|
| 1792 | /*
|
---|
| 1793 | * Advance.
|
---|
| 1794 | */
|
---|
| 1795 | if (pbSrcBuf)
|
---|
| 1796 | pbSrcBuf += cbToCopy;
|
---|
| 1797 | else
|
---|
| 1798 | {
|
---|
| 1799 | offSrcPage += cbToCopy;
|
---|
[77853] | 1800 | Assert(offSrcPage < PAGE_SIZE * 2);
|
---|
[77770] | 1801 | if (offSrcPage >= PAGE_SIZE) {
|
---|
| 1802 | offSrcPage &= PAGE_OFFSET_MASK;
|
---|
| 1803 | papSrcPages++;
|
---|
[77853] | 1804 | # ifdef VBOX_STRICT
|
---|
| 1805 | Assert(cSrcPages > 0);
|
---|
| 1806 | cSrcPages--;
|
---|
| 1807 | # endif
|
---|
[77770] | 1808 | }
|
---|
| 1809 | }
|
---|
[77853] | 1810 | offFile += cbToCopy;
|
---|
| 1811 | cbRange -= cbToCopy;
|
---|
[77770] | 1812 | }
|
---|
| 1813 | }
|
---|
[77853] | 1814 | RT_NOREF(cSrcPages);
|
---|
[77443] | 1815 | }
|
---|
| 1816 |
|
---|
[88716] | 1817 | #if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */
|
---|
[77443] | 1818 |
|
---|
| 1819 | /**
|
---|
[77529] | 1820 | * Fallback case of vbsf_reg_write() that locks the user buffers and let the host
|
---|
[77141] | 1821 | * write directly to them.
|
---|
| 1822 | */
|
---|
[77549] | 1823 | static ssize_t vbsf_reg_write_locking(struct file *file, const char /*__user*/ *buf, size_t size, loff_t *off, loff_t offFile,
|
---|
| 1824 | struct inode *inode, struct vbsf_inode_info *sf_i,
|
---|
[77951] | 1825 | struct vbsf_super_info *pSuperInfo, struct vbsf_reg_info *sf_r)
|
---|
[77141] | 1826 | {
|
---|
[77526] | 1827 | /*
|
---|
| 1828 | * Lock pages and execute the write, taking care not to pass the host
|
---|
| 1829 | * more than it can handle in one go or more than we care to allocate
|
---|
| 1830 | * page arrays for. The latter limit is set at just short of 32KB due
|
---|
| 1831 | * to how the physical heap works.
|
---|
| 1832 | */
|
---|
| 1833 | struct page *apPagesStack[16];
|
---|
| 1834 | struct page **papPages = &apPagesStack[0];
|
---|
| 1835 | struct page **papPagesFree = NULL;
|
---|
| 1836 | VBOXSFWRITEPGLSTREQ *pReq;
|
---|
| 1837 | ssize_t cbRet = -ENOMEM;
|
---|
| 1838 | size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
|
---|
[77951] | 1839 | size_t cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages);
|
---|
[77549] | 1840 | bool fLockPgHack;
|
---|
[77141] | 1841 |
|
---|
[77526] | 1842 | pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
|
---|
| 1843 | while (!pReq && cMaxPages > 4) {
|
---|
| 1844 | cMaxPages /= 2;
|
---|
| 1845 | pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
|
---|
| 1846 | }
|
---|
[77626] | 1847 | if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
|
---|
[77526] | 1848 | papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
|
---|
| 1849 | if (pReq && papPages) {
|
---|
| 1850 | cbRet = 0;
|
---|
| 1851 | for (;;) {
|
---|
| 1852 | /*
|
---|
| 1853 | * Figure out how much to process now and lock the user pages.
|
---|
| 1854 | */
|
---|
| 1855 | int rc;
|
---|
| 1856 | size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK;
|
---|
| 1857 | pReq->PgLst.offFirstPage = (uint16_t)cbChunk;
|
---|
| 1858 | cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 1859 | if (cPages <= cMaxPages)
|
---|
| 1860 | cbChunk = size;
|
---|
| 1861 | else {
|
---|
| 1862 | cPages = cMaxPages;
|
---|
| 1863 | cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
|
---|
| 1864 | }
|
---|
[77141] | 1865 |
|
---|
[77549] | 1866 | rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages, &fLockPgHack);
|
---|
[77526] | 1867 | if (rc == 0) {
|
---|
| 1868 | size_t iPage = cPages;
|
---|
| 1869 | while (iPage-- > 0)
|
---|
| 1870 | pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
|
---|
| 1871 | } else {
|
---|
| 1872 | cbRet = rc;
|
---|
| 1873 | break;
|
---|
| 1874 | }
|
---|
[77141] | 1875 |
|
---|
[77526] | 1876 | /*
|
---|
| 1877 | * Issue the request and unlock the pages.
|
---|
| 1878 | */
|
---|
[77951] | 1879 | rc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
|
---|
[77959] | 1880 | sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
|
---|
[77526] | 1881 | if (RT_SUCCESS(rc)) {
|
---|
| 1882 | /*
|
---|
| 1883 | * Success, advance position and buffer.
|
---|
| 1884 | */
|
---|
| 1885 | uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
|
---|
| 1886 | AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
|
---|
[77770] | 1887 |
|
---|
| 1888 | vbsf_reg_write_sync_page_cache(inode->i_mapping, offFile, cbActual, NULL /*pbKrnlBuf*/,
|
---|
[77853] | 1889 | papPages, (uintptr_t)buf & PAGE_OFFSET_MASK, cPages);
|
---|
[77966] | 1890 | Assert(cPages <= cMaxPages);
|
---|
[77770] | 1891 | vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack);
|
---|
| 1892 |
|
---|
[77526] | 1893 | cbRet += cbActual;
|
---|
| 1894 | buf = (uint8_t *)buf + cbActual;
|
---|
| 1895 | size -= cbActual;
|
---|
[77976] | 1896 |
|
---|
| 1897 | offFile += cbActual;
|
---|
| 1898 | if ((file->f_flags & O_APPEND) && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
|
---|
| 1899 | offFile = pReq->Parms.off64Write.u.value64;
|
---|
[77526] | 1900 | if (offFile > i_size_read(inode))
|
---|
| 1901 | i_size_write(inode, offFile);
|
---|
[77976] | 1902 |
|
---|
[77631] | 1903 | sf_i->force_restat = 1; /* mtime (and size) may have changed */
|
---|
[77141] | 1904 |
|
---|
[77526] | 1905 | /*
|
---|
| 1906 | * Are we done already? If so commit the new file offset.
|
---|
| 1907 | */
|
---|
| 1908 | if (!size || cbActual < cbChunk) {
|
---|
| 1909 | *off = offFile;
|
---|
| 1910 | break;
|
---|
| 1911 | }
|
---|
| 1912 | } else {
|
---|
[77770] | 1913 | vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack);
|
---|
| 1914 | if (rc == VERR_NO_MEMORY && cMaxPages > 4) {
|
---|
| 1915 | /*
|
---|
| 1916 | * The host probably doesn't have enough heap to handle the
|
---|
| 1917 | * request, reduce the page count and retry.
|
---|
| 1918 | */
|
---|
| 1919 | cMaxPages /= 4;
|
---|
| 1920 | Assert(cMaxPages > 0);
|
---|
| 1921 | } else {
|
---|
| 1922 | /*
|
---|
| 1923 | * If we've successfully written stuff, return it rather than
|
---|
| 1924 | * the error. (Not sure if this is such a great idea...)
|
---|
| 1925 | */
|
---|
[77967] | 1926 | if (cbRet > 0) {
|
---|
| 1927 | SFLOGFLOW(("vbsf_reg_write: write at %#RX64 -> %Rrc; got cbRet=%#zx already\n", offFile, rc, cbRet));
|
---|
[77770] | 1928 | *off = offFile;
|
---|
[77967] | 1929 | } else {
|
---|
| 1930 | SFLOGFLOW(("vbsf_reg_write: write at %#RX64 -> %Rrc\n", offFile, rc));
|
---|
[77770] | 1931 | cbRet = -EPROTO;
|
---|
[77967] | 1932 | }
|
---|
[77770] | 1933 | break;
|
---|
| 1934 | }
|
---|
[77526] | 1935 | }
|
---|
| 1936 | }
|
---|
| 1937 | }
|
---|
| 1938 | if (papPagesFree)
|
---|
| 1939 | kfree(papPages);
|
---|
| 1940 | if (pReq)
|
---|
| 1941 | VbglR0PhysHeapFree(pReq);
|
---|
[77966] | 1942 | SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off));
|
---|
[77526] | 1943 | return cbRet;
|
---|
[77141] | 1944 | }
|
---|
| 1945 |
|
---|
[88716] | 1946 |
|
---|
[77141] | 1947 | /**
|
---|
[30175] | 1948 | * Write to a regular file.
|
---|
| 1949 | *
|
---|
| 1950 | * @param file the file
|
---|
| 1951 | * @param buf the buffer
|
---|
| 1952 | * @param size length of the buffer
|
---|
| 1953 | * @param off offset within the file
|
---|
| 1954 | * @returns the number of written bytes on success, Linux error code otherwise
|
---|
| 1955 | */
|
---|
[77529] | 1956 | static ssize_t vbsf_reg_write(struct file *file, const char *buf, size_t size, loff_t * off)
|
---|
[2614] | 1957 | {
|
---|
[77951] | 1958 | struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
|
---|
| 1959 | struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
|
---|
| 1960 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
| 1961 | struct vbsf_reg_info *sf_r = file->private_data;
|
---|
| 1962 | struct address_space *mapping = inode->i_mapping;
|
---|
[77526] | 1963 | loff_t pos;
|
---|
[2614] | 1964 |
|
---|
[77529] | 1965 | SFLOGFLOW(("vbsf_reg_write: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
|
---|
[77951] | 1966 | Assert(sf_i);
|
---|
| 1967 | Assert(pSuperInfo);
|
---|
| 1968 | Assert(sf_r);
|
---|
[77626] | 1969 | AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
|
---|
[2614] | 1970 |
|
---|
[77526] | 1971 | pos = *off;
|
---|
| 1972 | if (file->f_flags & O_APPEND)
|
---|
| 1973 | pos = i_size_read(inode);
|
---|
[16466] | 1974 |
|
---|
[77526] | 1975 | /** @todo XXX Check write permission according to inode->i_mode! */
|
---|
[11507] | 1976 |
|
---|
[77526] | 1977 | if (!size) {
|
---|
| 1978 | if (file->f_flags & O_APPEND) /** @todo check if this is the consensus behavior... */
|
---|
| 1979 | *off = pos;
|
---|
| 1980 | return 0;
|
---|
| 1981 | }
|
---|
[2614] | 1982 |
|
---|
[77953] | 1983 | /** @todo Implement the read-write caching mode. */
|
---|
| 1984 |
|
---|
[77526] | 1985 | /*
|
---|
| 1986 | * If there are active writable mappings, coordinate with any
|
---|
| 1987 | * pending writes via those.
|
---|
| 1988 | */
|
---|
| 1989 | if ( mapping
|
---|
| 1990 | && mapping->nrpages > 0
|
---|
| 1991 | && mapping_writably_mapped(mapping)) {
|
---|
[88571] | 1992 | # if RTLNX_VER_MIN(2,6,32)
|
---|
[77526] | 1993 | int err = filemap_fdatawait_range(mapping, pos, pos + size - 1);
|
---|
| 1994 | if (err)
|
---|
| 1995 | return err;
|
---|
[88571] | 1996 | # else
|
---|
[77526] | 1997 | /** @todo ... */
|
---|
[88571] | 1998 | # endif
|
---|
[77526] | 1999 | }
|
---|
[77443] | 2000 |
|
---|
[77526] | 2001 | /*
|
---|
| 2002 | * For small requests, try use an embedded buffer provided we get a heap block
|
---|
| 2003 | * that does not cross page boundraries (see host code).
|
---|
| 2004 | */
|
---|
| 2005 | if (size <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) /* see allocator */) {
|
---|
| 2006 | uint32_t const cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + size;
|
---|
| 2007 | VBOXSFWRITEEMBEDDEDREQ *pReq = (VBOXSFWRITEEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
|
---|
| 2008 | if ( pReq
|
---|
| 2009 | && (PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
|
---|
| 2010 | ssize_t cbRet;
|
---|
| 2011 | if (copy_from_user(pReq->abData, buf, size) == 0) {
|
---|
[77951] | 2012 | int vrc = VbglR0SfHostReqWriteEmbedded(pSuperInfo->map.root, pReq, sf_r->Handle.hHost,
|
---|
[77631] | 2013 | pos, (uint32_t)size);
|
---|
[77959] | 2014 | sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
|
---|
[77526] | 2015 | if (RT_SUCCESS(vrc)) {
|
---|
| 2016 | cbRet = pReq->Parms.cb32Write.u.value32;
|
---|
| 2017 | AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
|
---|
[77770] | 2018 | vbsf_reg_write_sync_page_cache(mapping, pos, (uint32_t)cbRet, pReq->abData,
|
---|
[77853] | 2019 | NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
|
---|
[77526] | 2020 | pos += cbRet;
|
---|
[77976] | 2021 | if ((file->f_flags & O_APPEND) && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
|
---|
| 2022 | pos = pReq->Parms.off64Write.u.value64;
|
---|
[77526] | 2023 | *off = pos;
|
---|
| 2024 | if (pos > i_size_read(inode))
|
---|
| 2025 | i_size_write(inode, pos);
|
---|
| 2026 | } else
|
---|
| 2027 | cbRet = -EPROTO;
|
---|
| 2028 | sf_i->force_restat = 1; /* mtime (and size) may have changed */
|
---|
| 2029 | } else
|
---|
| 2030 | cbRet = -EFAULT;
|
---|
[77141] | 2031 |
|
---|
[77526] | 2032 | VbglR0PhysHeapFree(pReq);
|
---|
[77966] | 2033 | SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [embed]\n", cbRet, cbRet, *off));
|
---|
[77526] | 2034 | return cbRet;
|
---|
| 2035 | }
|
---|
| 2036 | if (pReq)
|
---|
| 2037 | VbglR0PhysHeapFree(pReq);
|
---|
| 2038 | }
|
---|
[77141] | 2039 |
|
---|
[88571] | 2040 | # if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
|
---|
[77526] | 2041 | /*
|
---|
| 2042 | * For medium sized requests try use a bounce buffer.
|
---|
| 2043 | */
|
---|
| 2044 | if (size <= _64K /** @todo make this configurable? */) {
|
---|
| 2045 | void *pvBounce = kmalloc(size, GFP_KERNEL);
|
---|
| 2046 | if (pvBounce) {
|
---|
| 2047 | if (copy_from_user(pvBounce, buf, size) == 0) {
|
---|
| 2048 | VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
|
---|
| 2049 | if (pReq) {
|
---|
| 2050 | ssize_t cbRet;
|
---|
[77951] | 2051 | int vrc = VbglR0SfHostReqWriteContig(pSuperInfo->map.root, pReq, sf_r->handle, pos,
|
---|
[77959] | 2052 | (uint32_t)size, pvBounce, virt_to_phys(pvBounce));
|
---|
| 2053 | sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
|
---|
[77526] | 2054 | if (RT_SUCCESS(vrc)) {
|
---|
| 2055 | cbRet = pReq->Parms.cb32Write.u.value32;
|
---|
| 2056 | AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
|
---|
[77770] | 2057 | vbsf_reg_write_sync_page_cache(mapping, pos, (uint32_t)cbRet, (uint8_t const *)pvBounce,
|
---|
[77853] | 2058 | NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
|
---|
[77526] | 2059 | pos += cbRet;
|
---|
| 2060 | *off = pos;
|
---|
| 2061 | if (pos > i_size_read(inode))
|
---|
| 2062 | i_size_write(inode, pos);
|
---|
| 2063 | } else
|
---|
| 2064 | cbRet = -EPROTO;
|
---|
| 2065 | sf_i->force_restat = 1; /* mtime (and size) may have changed */
|
---|
| 2066 | VbglR0PhysHeapFree(pReq);
|
---|
| 2067 | kfree(pvBounce);
|
---|
[77966] | 2068 | SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [bounce]\n", cbRet, cbRet, *off));
|
---|
[77526] | 2069 | return cbRet;
|
---|
| 2070 | }
|
---|
| 2071 | kfree(pvBounce);
|
---|
| 2072 | } else {
|
---|
| 2073 | kfree(pvBounce);
|
---|
[77966] | 2074 | SFLOGFLOW(("vbsf_reg_write: returns -EFAULT, *off=%RX64 [bounce]\n", *off));
|
---|
[77526] | 2075 | return -EFAULT;
|
---|
| 2076 | }
|
---|
| 2077 | }
|
---|
| 2078 | }
|
---|
[88571] | 2079 | # endif
|
---|
[77141] | 2080 |
|
---|
[77951] | 2081 | return vbsf_reg_write_locking(file, buf, size, off, pos, inode, sf_i, pSuperInfo, sf_r);
|
---|
[2614] | 2082 | }
|
---|
[88716] | 2083 |
|
---|
[88571] | 2084 | #endif /* < 5.10.0 */
|
---|
[85698] | 2085 | #if RTLNX_VER_MIN(2,6,19)
|
---|
[77708] | 2086 |
|
---|
[30175] | 2087 | /**
|
---|
[77626] | 2088 | * Companion to vbsf_iter_lock_pages().
|
---|
| 2089 | */
|
---|
| 2090 | DECLINLINE(void) vbsf_iter_unlock_pages(struct iov_iter *iter, struct page **papPages, size_t cPages, bool fSetDirty)
|
---|
| 2091 | {
|
---|
[83049] | 2092 | /* We don't mark kernel pages dirty (KVECs, BVECs, PIPEs): */
|
---|
| 2093 | if (!iter_is_iovec(iter))
|
---|
[77626] | 2094 | fSetDirty = false;
|
---|
| 2095 |
|
---|
| 2096 | while (cPages-- > 0)
|
---|
| 2097 | {
|
---|
| 2098 | struct page *pPage = papPages[cPages];
|
---|
| 2099 | if (fSetDirty && !PageReserved(pPage))
|
---|
[77967] | 2100 | set_page_dirty(pPage);
|
---|
[77626] | 2101 | vbsf_put_page(pPage);
|
---|
| 2102 | }
|
---|
| 2103 | }
|
---|
| 2104 |
|
---|
| 2105 |
|
---|
| 2106 | /**
|
---|
| 2107 | * Locks up to @a cMaxPages from the I/O vector iterator, advancing the
|
---|
| 2108 | * iterator.
|
---|
| 2109 | *
|
---|
| 2110 | * @returns 0 on success, negative errno value on failure.
|
---|
| 2111 | * @param iter The iterator to lock pages from.
|
---|
| 2112 | * @param fWrite Whether to write (true) or read (false) lock the pages.
|
---|
| 2113 | * @param pStash Where we stash peek results.
|
---|
| 2114 | * @param cMaxPages The maximum number of pages to get.
|
---|
| 2115 | * @param papPages Where to return the locked pages.
|
---|
| 2116 | * @param pcPages Where to return the number of pages.
|
---|
| 2117 | * @param poffPage0 Where to return the offset into the first page.
|
---|
| 2118 | * @param pcbChunk Where to return the number of bytes covered.
|
---|
| 2119 | */
|
---|
| 2120 | static int vbsf_iter_lock_pages(struct iov_iter *iter, bool fWrite, struct vbsf_iter_stash *pStash, size_t cMaxPages,
|
---|
| 2121 | struct page **papPages, size_t *pcPages, size_t *poffPage0, size_t *pcbChunk)
|
---|
| 2122 | {
|
---|
| 2123 | size_t cbChunk = 0;
|
---|
| 2124 | size_t cPages = 0;
|
---|
| 2125 | size_t offPage0 = 0;
|
---|
| 2126 | int rc = 0;
|
---|
| 2127 |
|
---|
[77628] | 2128 | Assert(iov_iter_count(iter) + pStash->cb > 0);
|
---|
[77626] | 2129 | if (!(iter->type & ITER_KVEC)) {
|
---|
| 2130 | /*
|
---|
| 2131 | * Do we have a stashed page?
|
---|
| 2132 | */
|
---|
| 2133 | if (pStash->pPage) {
|
---|
| 2134 | papPages[0] = pStash->pPage;
|
---|
| 2135 | offPage0 = pStash->off;
|
---|
| 2136 | cbChunk = pStash->cb;
|
---|
| 2137 | cPages = 1;
|
---|
| 2138 | pStash->pPage = NULL;
|
---|
| 2139 | pStash->off = 0;
|
---|
| 2140 | pStash->cb = 0;
|
---|
| 2141 | if ( offPage0 + cbChunk < PAGE_SIZE
|
---|
| 2142 | || iov_iter_count(iter) == 0) {
|
---|
| 2143 | *poffPage0 = offPage0;
|
---|
| 2144 | *pcbChunk = cbChunk;
|
---|
| 2145 | *pcPages = cPages;
|
---|
[77628] | 2146 | SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx (stashed)\n",
|
---|
| 2147 | rc, cPages, offPage0, cbChunk));
|
---|
[77626] | 2148 | return 0;
|
---|
| 2149 | }
|
---|
| 2150 | cMaxPages -= 1;
|
---|
[77628] | 2151 | SFLOG3(("vbsf_iter_lock_pages: Picked up stashed page: %#zx LB %#zx\n", offPage0, cbChunk));
|
---|
[77626] | 2152 | } else {
|
---|
[85698] | 2153 | # if RTLNX_VER_MAX(4,11,0)
|
---|
[77626] | 2154 | /*
|
---|
| 2155 | * Copy out our starting point to assist rewinding.
|
---|
| 2156 | */
|
---|
| 2157 | pStash->offFromEnd = iov_iter_count(iter);
|
---|
| 2158 | pStash->Copy = *iter;
|
---|
| 2159 | # endif
|
---|
| 2160 | }
|
---|
| 2161 |
|
---|
| 2162 | /*
|
---|
| 2163 | * Get pages segment by segment.
|
---|
| 2164 | */
|
---|
| 2165 | do {
|
---|
| 2166 | /*
|
---|
| 2167 | * Make a special case of the first time thru here, since that's
|
---|
| 2168 | * the most typical scenario.
|
---|
| 2169 | */
|
---|
| 2170 | ssize_t cbSegRet;
|
---|
| 2171 | if (cPages == 0) {
|
---|
[85698] | 2172 | # if RTLNX_VER_MAX(3,19,0)
|
---|
[77710] | 2173 | while (!iov_iter_single_seg_count(iter)) /* Old code didn't skip empty segments which caused EFAULTs. */
|
---|
| 2174 | iov_iter_advance(iter, 0);
|
---|
| 2175 | # endif
|
---|
[77626] | 2176 | cbSegRet = iov_iter_get_pages(iter, papPages, iov_iter_count(iter), cMaxPages, &offPage0);
|
---|
| 2177 | if (cbSegRet > 0) {
|
---|
| 2178 | iov_iter_advance(iter, cbSegRet);
|
---|
| 2179 | cbChunk = (size_t)cbSegRet;
|
---|
| 2180 | cPages = RT_ALIGN_Z(offPage0 + cbSegRet, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 2181 | cMaxPages -= cPages;
|
---|
[77628] | 2182 | SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages -> %#zx @ %#zx; %#zx pages [first]\n", cbSegRet, offPage0, cPages));
|
---|
[77626] | 2183 | if ( cMaxPages == 0
|
---|
| 2184 | || ((offPage0 + (size_t)cbSegRet) & PAGE_OFFSET_MASK))
|
---|
| 2185 | break;
|
---|
| 2186 | } else {
|
---|
| 2187 | AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
|
---|
| 2188 | rc = (int)cbSegRet;
|
---|
| 2189 | break;
|
---|
| 2190 | }
|
---|
| 2191 | } else {
|
---|
| 2192 | /*
|
---|
| 2193 | * Probe first page of new segment to check that we've got a zero offset and
|
---|
| 2194 | * can continue on the current chunk. Stash the page if the offset isn't zero.
|
---|
| 2195 | */
|
---|
| 2196 | size_t offPgProbe;
|
---|
| 2197 | size_t cbSeg = iov_iter_single_seg_count(iter);
|
---|
| 2198 | while (!cbSeg) {
|
---|
| 2199 | iov_iter_advance(iter, 0);
|
---|
| 2200 | cbSeg = iov_iter_single_seg_count(iter);
|
---|
| 2201 | }
|
---|
| 2202 | cbSegRet = iov_iter_get_pages(iter, &papPages[cPages], iov_iter_count(iter), 1, &offPgProbe);
|
---|
| 2203 | if (cbSegRet > 0) {
|
---|
| 2204 | iov_iter_advance(iter, cbSegRet); /** @todo maybe not do this if we stash the page? */
|
---|
[77628] | 2205 | Assert(offPgProbe + cbSegRet <= PAGE_SIZE);
|
---|
[77626] | 2206 | if (offPgProbe == 0) {
|
---|
| 2207 | cbChunk += cbSegRet;
|
---|
| 2208 | cPages += 1;
|
---|
| 2209 | cMaxPages -= 1;
|
---|
[77628] | 2210 | SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages(1) -> %#zx @ %#zx\n", cbSegRet, offPgProbe));
|
---|
[77626] | 2211 | if ( cMaxPages == 0
|
---|
| 2212 | || cbSegRet != PAGE_SIZE)
|
---|
| 2213 | break;
|
---|
| 2214 |
|
---|
| 2215 | /*
|
---|
| 2216 | * Get the rest of the segment (if anything remaining).
|
---|
| 2217 | */
|
---|
| 2218 | cbSeg -= cbSegRet;
|
---|
| 2219 | if (cbSeg > 0) {
|
---|
| 2220 | cbSegRet = iov_iter_get_pages(iter, &papPages[cPages], iov_iter_count(iter), cMaxPages, &offPgProbe);
|
---|
| 2221 | if (cbSegRet > 0) {
|
---|
[77628] | 2222 | size_t const cPgRet = RT_ALIGN_Z((size_t)cbSegRet, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
[77626] | 2223 | Assert(offPgProbe == 0);
|
---|
| 2224 | iov_iter_advance(iter, cbSegRet);
|
---|
[77628] | 2225 | SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages() -> %#zx; %#zx pages\n", cbSegRet, cPgRet));
|
---|
| 2226 | cPages += cPgRet;
|
---|
| 2227 | cMaxPages -= cPgRet;
|
---|
[77626] | 2228 | cbChunk += cbSegRet;
|
---|
| 2229 | if ( cMaxPages == 0
|
---|
| 2230 | || ((size_t)cbSegRet & PAGE_OFFSET_MASK))
|
---|
| 2231 | break;
|
---|
| 2232 | } else {
|
---|
| 2233 | AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
|
---|
| 2234 | rc = (int)cbSegRet;
|
---|
| 2235 | break;
|
---|
| 2236 | }
|
---|
| 2237 | }
|
---|
| 2238 | } else {
|
---|
| 2239 | /* The segment didn't start at a page boundrary, so stash it for
|
---|
| 2240 | the next round: */
|
---|
[77628] | 2241 | SFLOGFLOW(("vbsf_iter_lock_pages: iov_iter_get_pages(1) -> %#zx @ %#zx; stashed\n", cbSegRet, offPgProbe));
|
---|
[77626] | 2242 | Assert(papPages[cPages]);
|
---|
| 2243 | pStash->pPage = papPages[cPages];
|
---|
[77628] | 2244 | pStash->off = offPgProbe;
|
---|
[77626] | 2245 | pStash->cb = cbSegRet;
|
---|
| 2246 | break;
|
---|
| 2247 | }
|
---|
| 2248 | } else {
|
---|
| 2249 | AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
|
---|
| 2250 | rc = (int)cbSegRet;
|
---|
| 2251 | break;
|
---|
| 2252 | }
|
---|
| 2253 | }
|
---|
| 2254 | Assert(cMaxPages > 0);
|
---|
| 2255 | } while (iov_iter_count(iter) > 0);
|
---|
| 2256 |
|
---|
| 2257 | } else {
|
---|
| 2258 | /*
|
---|
| 2259 | * The silly iov_iter_get_pages_alloc() function doesn't handle KVECs,
|
---|
| 2260 | * so everyone needs to do that by themselves.
|
---|
| 2261 | *
|
---|
| 2262 | * Note! Fixes here may apply to rtR0MemObjNativeLockKernel()
|
---|
| 2263 | * and vbsf_lock_user_pages_failed_check_kernel() as well.
|
---|
| 2264 | */
|
---|
[85698] | 2265 | # if RTLNX_VER_MAX(4,11,0)
|
---|
[77626] | 2266 | pStash->offFromEnd = iov_iter_count(iter);
|
---|
| 2267 | pStash->Copy = *iter;
|
---|
| 2268 | # endif
|
---|
| 2269 | do {
|
---|
| 2270 | uint8_t *pbBuf;
|
---|
| 2271 | size_t offStart;
|
---|
| 2272 | size_t cPgSeg;
|
---|
| 2273 |
|
---|
| 2274 | size_t cbSeg = iov_iter_single_seg_count(iter);
|
---|
| 2275 | while (!cbSeg) {
|
---|
| 2276 | iov_iter_advance(iter, 0);
|
---|
| 2277 | cbSeg = iov_iter_single_seg_count(iter);
|
---|
| 2278 | }
|
---|
| 2279 |
|
---|
[85698] | 2280 | # if RTLNX_VER_MIN(3,19,0)
|
---|
[77626] | 2281 | pbBuf = iter->kvec->iov_base + iter->iov_offset;
|
---|
| 2282 | # else
|
---|
| 2283 | pbBuf = iter->iov->iov_base + iter->iov_offset;
|
---|
| 2284 | # endif
|
---|
| 2285 | offStart = (uintptr_t)pbBuf & PAGE_OFFSET_MASK;
|
---|
| 2286 | if (!cPages)
|
---|
| 2287 | offPage0 = offStart;
|
---|
| 2288 | else if (offStart)
|
---|
| 2289 | break;
|
---|
| 2290 |
|
---|
| 2291 | cPgSeg = RT_ALIGN_Z(cbSeg, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 2292 | if (cPgSeg > cMaxPages) {
|
---|
| 2293 | cPgSeg = cMaxPages;
|
---|
| 2294 | cbSeg = (cPgSeg << PAGE_SHIFT) - offStart;
|
---|
| 2295 | }
|
---|
| 2296 |
|
---|
| 2297 | rc = vbsf_lock_kernel_pages(pbBuf, fWrite, cPgSeg, &papPages[cPages]);
|
---|
| 2298 | if (rc == 0) {
|
---|
| 2299 | iov_iter_advance(iter, cbSeg);
|
---|
| 2300 | cbChunk += cbSeg;
|
---|
| 2301 | cPages += cPgSeg;
|
---|
| 2302 | cMaxPages -= cPgSeg;
|
---|
| 2303 | if ( cMaxPages == 0
|
---|
| 2304 | || ((offStart + cbSeg) & PAGE_OFFSET_MASK) != 0)
|
---|
| 2305 | break;
|
---|
| 2306 | } else
|
---|
| 2307 | break;
|
---|
| 2308 | } while (iov_iter_count(iter) > 0);
|
---|
| 2309 | }
|
---|
| 2310 |
|
---|
| 2311 | /*
|
---|
| 2312 | * Clean up if we failed; set return values.
|
---|
| 2313 | */
|
---|
| 2314 | if (rc == 0) {
|
---|
| 2315 | /* likely */
|
---|
| 2316 | } else {
|
---|
| 2317 | if (cPages > 0)
|
---|
| 2318 | vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
|
---|
| 2319 | offPage0 = cbChunk = cPages = 0;
|
---|
| 2320 | }
|
---|
| 2321 | *poffPage0 = offPage0;
|
---|
| 2322 | *pcbChunk = cbChunk;
|
---|
| 2323 | *pcPages = cPages;
|
---|
[77628] | 2324 | SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx\n", rc, cPages, offPage0, cbChunk));
|
---|
[77626] | 2325 | return rc;
|
---|
| 2326 | }
|
---|
| 2327 |
|
---|
| 2328 |
|
---|
| 2329 | /**
|
---|
| 2330 | * Rewinds the I/O vector.
|
---|
| 2331 | */
|
---|
| 2332 | static bool vbsf_iter_rewind(struct iov_iter *iter, struct vbsf_iter_stash *pStash, size_t cbToRewind, size_t cbChunk)
|
---|
| 2333 | {
|
---|
| 2334 | size_t cbExtra;
|
---|
| 2335 | if (!pStash->pPage) {
|
---|
| 2336 | cbExtra = 0;
|
---|
| 2337 | } else {
|
---|
| 2338 | cbExtra = pStash->cb;
|
---|
| 2339 | vbsf_put_page(pStash->pPage);
|
---|
| 2340 | pStash->pPage = NULL;
|
---|
| 2341 | pStash->cb = 0;
|
---|
| 2342 | pStash->off = 0;
|
---|
| 2343 | }
|
---|
| 2344 |
|
---|
[85698] | 2345 | # if RTLNX_VER_MIN(4,11,0) || RTLNX_VER_MAX(3,16,0)
|
---|
[77626] | 2346 | iov_iter_revert(iter, cbToRewind + cbExtra);
|
---|
| 2347 | return true;
|
---|
| 2348 | # else
|
---|
| 2349 | /** @todo impl this */
|
---|
| 2350 | return false;
|
---|
| 2351 | # endif
|
---|
| 2352 | }
|
---|
| 2353 |
|
---|
| 2354 |
|
---|
| 2355 | /**
|
---|
| 2356 | * Cleans up the page locking stash.
|
---|
| 2357 | */
|
---|
| 2358 | DECLINLINE(void) vbsf_iter_cleanup_stash(struct iov_iter *iter, struct vbsf_iter_stash *pStash)
|
---|
| 2359 | {
|
---|
| 2360 | if (pStash->pPage)
|
---|
| 2361 | vbsf_iter_rewind(iter, pStash, 0, 0);
|
---|
| 2362 | }
|
---|
| 2363 |
|
---|
| 2364 |
|
---|
| 2365 | /**
|
---|
| 2366 | * Calculates the longest span of pages we could transfer to the host in a
|
---|
| 2367 | * single request.
|
---|
| 2368 | *
|
---|
| 2369 | * @returns Page count, non-zero.
|
---|
| 2370 | * @param iter The I/O vector iterator to inspect.
|
---|
| 2371 | */
|
---|
| 2372 | static size_t vbsf_iter_max_span_of_pages(struct iov_iter *iter)
|
---|
| 2373 | {
|
---|
| 2374 | size_t cPages;
|
---|
[85698] | 2375 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77628] | 2376 | if (iter_is_iovec(iter) || (iter->type & ITER_KVEC)) {
|
---|
[77873] | 2377 | #endif
|
---|
[77626] | 2378 | const struct iovec *pCurIov = iter->iov;
|
---|
| 2379 | size_t cLeft = iter->nr_segs;
|
---|
| 2380 | size_t cPagesSpan = 0;
|
---|
| 2381 |
|
---|
[77628] | 2382 | /* iovect and kvec are identical, except for the __user tagging of iov_base. */
|
---|
[77626] | 2383 | AssertCompileMembersSameSizeAndOffset(struct iovec, iov_base, struct kvec, iov_base);
|
---|
| 2384 | AssertCompileMembersSameSizeAndOffset(struct iovec, iov_len, struct kvec, iov_len);
|
---|
| 2385 | AssertCompile(sizeof(struct iovec) == sizeof(struct kvec));
|
---|
| 2386 |
|
---|
| 2387 | cPages = 1;
|
---|
| 2388 | AssertReturn(cLeft > 0, cPages);
|
---|
| 2389 |
|
---|
| 2390 | /* Special case: segment offset. */
|
---|
| 2391 | if (iter->iov_offset > 0) {
|
---|
| 2392 | if (iter->iov_offset < pCurIov->iov_len) {
|
---|
| 2393 | size_t const cbSegLeft = pCurIov->iov_len - iter->iov_offset;
|
---|
| 2394 | size_t const offPage0 = ((uintptr_t)pCurIov->iov_base + iter->iov_offset) & PAGE_OFFSET_MASK;
|
---|
| 2395 | cPages = cPagesSpan = RT_ALIGN_Z(offPage0 + cbSegLeft, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 2396 | if ((offPage0 + cbSegLeft) & PAGE_OFFSET_MASK)
|
---|
| 2397 | cPagesSpan = 0;
|
---|
| 2398 | }
|
---|
[77628] | 2399 | SFLOGFLOW(("vbsf_iter: seg[0]= %p LB %#zx\n", pCurIov->iov_base, pCurIov->iov_len));
|
---|
[77626] | 2400 | pCurIov++;
|
---|
| 2401 | cLeft--;
|
---|
| 2402 | }
|
---|
| 2403 |
|
---|
| 2404 | /* Full segments. */
|
---|
| 2405 | while (cLeft-- > 0) {
|
---|
| 2406 | if (pCurIov->iov_len > 0) {
|
---|
| 2407 | size_t const offPage0 = (uintptr_t)pCurIov->iov_base & PAGE_OFFSET_MASK;
|
---|
| 2408 | if (offPage0 == 0) {
|
---|
| 2409 | if (!(pCurIov->iov_len & PAGE_OFFSET_MASK)) {
|
---|
| 2410 | cPagesSpan += pCurIov->iov_len >> PAGE_SHIFT;
|
---|
| 2411 | } else {
|
---|
| 2412 | cPagesSpan += RT_ALIGN_Z(pCurIov->iov_len, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 2413 | if (cPagesSpan > cPages)
|
---|
| 2414 | cPages = cPagesSpan;
|
---|
| 2415 | cPagesSpan = 0;
|
---|
| 2416 | }
|
---|
| 2417 | } else {
|
---|
| 2418 | if (cPagesSpan > cPages)
|
---|
| 2419 | cPages = cPagesSpan;
|
---|
| 2420 | if (!((offPage0 + pCurIov->iov_len) & PAGE_OFFSET_MASK)) {
|
---|
| 2421 | cPagesSpan = pCurIov->iov_len >> PAGE_SHIFT;
|
---|
| 2422 | } else {
|
---|
| 2423 | cPagesSpan += RT_ALIGN_Z(offPage0 + pCurIov->iov_len, PAGE_SIZE) >> PAGE_SHIFT;
|
---|
| 2424 | if (cPagesSpan > cPages)
|
---|
| 2425 | cPages = cPagesSpan;
|
---|
| 2426 | cPagesSpan = 0;
|
---|
| 2427 | }
|
---|
| 2428 | }
|
---|
| 2429 | }
|
---|
[77628] | 2430 | SFLOGFLOW(("vbsf_iter: seg[%u]= %p LB %#zx\n", iter->nr_segs - cLeft, pCurIov->iov_base, pCurIov->iov_len));
|
---|
[77626] | 2431 | pCurIov++;
|
---|
| 2432 | }
|
---|
| 2433 | if (cPagesSpan > cPages)
|
---|
| 2434 | cPages = cPagesSpan;
|
---|
[85698] | 2435 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77873] | 2436 | } else {
|
---|
[77626] | 2437 | /* Won't bother with accurate counts for the next two types, just make
|
---|
| 2438 | some rough estimates (does pipes have segments?): */
|
---|
| 2439 | size_t cSegs = iter->type & ITER_BVEC ? RT_MAX(1, iter->nr_segs) : 1;
|
---|
| 2440 | cPages = (iov_iter_count(iter) + (PAGE_SIZE * 2 - 2) * cSegs) >> PAGE_SHIFT;
|
---|
| 2441 | }
|
---|
[77873] | 2442 | # endif
|
---|
[77628] | 2443 | SFLOGFLOW(("vbsf_iter_max_span_of_pages: returns %#zx\n", cPages));
|
---|
[77626] | 2444 | return cPages;
|
---|
| 2445 | }
|
---|
| 2446 |
|
---|
| 2447 |
|
---|
| 2448 | /**
|
---|
| 2449 | * Worker for vbsf_reg_read_iter() that deals with larger reads using page
|
---|
| 2450 | * locking.
|
---|
| 2451 | */
|
---|
| 2452 | static ssize_t vbsf_reg_read_iter_locking(struct kiocb *kio, struct iov_iter *iter, size_t cbToRead,
|
---|
[77951] | 2453 | struct vbsf_super_info *pSuperInfo, struct vbsf_reg_info *sf_r)
|
---|
[77626] | 2454 | {
|
---|
| 2455 | /*
|
---|
| 2456 | * Estimate how many pages we may possible submit in a single request so
|
---|
| 2457 | * that we can allocate matching request buffer and page array.
|
---|
| 2458 | */
|
---|
| 2459 | struct page *apPagesStack[16];
|
---|
| 2460 | struct page **papPages = &apPagesStack[0];
|
---|
| 2461 | struct page **papPagesFree = NULL;
|
---|
| 2462 | VBOXSFREADPGLSTREQ *pReq;
|
---|
| 2463 | ssize_t cbRet = 0;
|
---|
| 2464 | size_t cMaxPages = vbsf_iter_max_span_of_pages(iter);
|
---|
[77951] | 2465 | cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages);
|
---|
[77626] | 2466 |
|
---|
| 2467 | pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
|
---|
| 2468 | while (!pReq && cMaxPages > 4) {
|
---|
| 2469 | cMaxPages /= 2;
|
---|
| 2470 | pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
|
---|
| 2471 | }
|
---|
| 2472 | if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
|
---|
| 2473 | papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
|
---|
| 2474 | if (pReq && papPages) {
|
---|
| 2475 |
|
---|
| 2476 | /*
|
---|
| 2477 | * The read loop.
|
---|
| 2478 | */
|
---|
| 2479 | struct vbsf_iter_stash Stash = VBSF_ITER_STASH_INITIALIZER;
|
---|
| 2480 | do {
|
---|
| 2481 | /*
|
---|
| 2482 | * Grab as many pages as we can. This means that if adjacent
|
---|
| 2483 | * segments both starts and ends at a page boundrary, we can
|
---|
| 2484 | * do them both in the same transfer from the host.
|
---|
| 2485 | */
|
---|
| 2486 | size_t cPages = 0;
|
---|
| 2487 | size_t cbChunk = 0;
|
---|
| 2488 | size_t offPage0 = 0;
|
---|
| 2489 | int rc = vbsf_iter_lock_pages(iter, true /*fWrite*/, &Stash, cMaxPages, papPages, &cPages, &offPage0, &cbChunk);
|
---|
| 2490 | if (rc == 0) {
|
---|
| 2491 | size_t iPage = cPages;
|
---|
| 2492 | while (iPage-- > 0)
|
---|
| 2493 | pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
|
---|
| 2494 | pReq->PgLst.offFirstPage = (uint16_t)offPage0;
|
---|
| 2495 | AssertStmt(cbChunk <= cbToRead, cbChunk = cbToRead);
|
---|
| 2496 | } else {
|
---|
| 2497 | cbRet = rc;
|
---|
| 2498 | break;
|
---|
| 2499 | }
|
---|
| 2500 |
|
---|
| 2501 | /*
|
---|
| 2502 | * Issue the request and unlock the pages.
|
---|
| 2503 | */
|
---|
[77951] | 2504 | rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, kio->ki_pos, cbChunk, cPages);
|
---|
[77626] | 2505 | SFLOGFLOW(("vbsf_reg_read_iter_locking: VbglR0SfHostReqReadPgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n",
|
---|
| 2506 | rc, pReq->Parms.cb32Read.u.value32, cbChunk, cbToRead, cPages, offPage0));
|
---|
| 2507 |
|
---|
| 2508 | vbsf_iter_unlock_pages(iter, papPages, cPages, true /*fSetDirty*/);
|
---|
| 2509 |
|
---|
| 2510 | if (RT_SUCCESS(rc)) {
|
---|
| 2511 | /*
|
---|
| 2512 | * Success, advance position and buffer.
|
---|
| 2513 | */
|
---|
| 2514 | uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
|
---|
| 2515 | AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
|
---|
| 2516 | cbRet += cbActual;
|
---|
| 2517 | kio->ki_pos += cbActual;
|
---|
| 2518 | cbToRead -= cbActual;
|
---|
| 2519 |
|
---|
| 2520 | /*
|
---|
| 2521 | * Are we done already?
|
---|
| 2522 | */
|
---|
| 2523 | if (!cbToRead)
|
---|
| 2524 | break;
|
---|
| 2525 | if (cbActual < cbChunk) { /* We ASSUME end-of-file here. */
|
---|
| 2526 | if (vbsf_iter_rewind(iter, &Stash, cbChunk - cbActual, cbActual))
|
---|
| 2527 | iov_iter_truncate(iter, 0);
|
---|
| 2528 | break;
|
---|
| 2529 | }
|
---|
| 2530 | } else {
|
---|
| 2531 | /*
|
---|
| 2532 | * Try rewind the iter structure.
|
---|
| 2533 | */
|
---|
| 2534 | bool const fRewindOkay = vbsf_iter_rewind(iter, &Stash, cbChunk, cbChunk);
|
---|
| 2535 | if (rc == VERR_NO_MEMORY && cMaxPages > 4 && fRewindOkay) {
|
---|
| 2536 | /*
|
---|
| 2537 | * The host probably doesn't have enough heap to handle the
|
---|
| 2538 | * request, reduce the page count and retry.
|
---|
| 2539 | */
|
---|
| 2540 | cMaxPages /= 4;
|
---|
| 2541 | Assert(cMaxPages > 0);
|
---|
| 2542 | } else {
|
---|
| 2543 | /*
|
---|
| 2544 | * If we've successfully read stuff, return it rather than
|
---|
| 2545 | * the error. (Not sure if this is such a great idea...)
|
---|
| 2546 | */
|
---|
| 2547 | if (cbRet <= 0)
|
---|
| 2548 | cbRet = -EPROTO;
|
---|
| 2549 | break;
|
---|
| 2550 | }
|
---|
| 2551 | }
|
---|
| 2552 | } while (cbToRead > 0);
|
---|
| 2553 |
|
---|
| 2554 | vbsf_iter_cleanup_stash(iter, &Stash);
|
---|
| 2555 | }
|
---|
| 2556 | else
|
---|
| 2557 | cbRet = -ENOMEM;
|
---|
| 2558 | if (papPagesFree)
|
---|
| 2559 | kfree(papPages);
|
---|
| 2560 | if (pReq)
|
---|
| 2561 | VbglR0PhysHeapFree(pReq);
|
---|
| 2562 | SFLOGFLOW(("vbsf_reg_read_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
|
---|
| 2563 | return cbRet;
|
---|
| 2564 | }
|
---|
| 2565 |
|
---|
| 2566 |
|
---|
| 2567 | /**
|
---|
| 2568 | * Read into I/O vector iterator.
|
---|
| 2569 | *
|
---|
| 2570 | * @returns Number of bytes read on success, negative errno on error.
|
---|
| 2571 | * @param kio The kernel I/O control block (or something like that).
|
---|
| 2572 | * @param iter The I/O vector iterator describing the buffer.
|
---|
| 2573 | */
|
---|
[85698] | 2574 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77626] | 2575 | static ssize_t vbsf_reg_read_iter(struct kiocb *kio, struct iov_iter *iter)
|
---|
[77873] | 2576 | # else
|
---|
| 2577 | static ssize_t vbsf_reg_aio_read(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile)
|
---|
| 2578 | # endif
|
---|
[77626] | 2579 | {
|
---|
[85698] | 2580 | # if RTLNX_VER_MAX(3,16,0)
|
---|
[77951] | 2581 | struct vbsf_iov_iter fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 0 /*write*/);
|
---|
| 2582 | struct vbsf_iov_iter *iter = &fake_iter;
|
---|
[77873] | 2583 | # endif
|
---|
[77951] | 2584 | size_t cbToRead = iov_iter_count(iter);
|
---|
| 2585 | struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
|
---|
| 2586 | struct address_space *mapping = inode->i_mapping;
|
---|
[77626] | 2587 |
|
---|
[77951] | 2588 | struct vbsf_reg_info *sf_r = kio->ki_filp->private_data;
|
---|
| 2589 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
[77626] | 2590 |
|
---|
| 2591 | SFLOGFLOW(("vbsf_reg_read_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
|
---|
| 2592 | inode, kio->ki_filp, cbToRead, kio->ki_pos, iter->type));
|
---|
| 2593 | AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
|
---|
| 2594 |
|
---|
| 2595 | /*
|
---|
| 2596 | * Do we have anything at all to do here?
|
---|
| 2597 | */
|
---|
| 2598 | if (!cbToRead)
|
---|
| 2599 | return 0;
|
---|
| 2600 |
|
---|
| 2601 | /*
|
---|
| 2602 | * If there is a mapping and O_DIRECT isn't in effect, we must at a
|
---|
| 2603 | * heed dirty pages in the mapping and read from them. For simplicity
|
---|
| 2604 | * though, we just do page cache reading when there are writable
|
---|
| 2605 | * mappings around with any kind of pages loaded.
|
---|
| 2606 | */
|
---|
[77951] | 2607 | if (vbsf_should_use_cached_read(kio->ki_filp, mapping, pSuperInfo)) {
|
---|
[85698] | 2608 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77626] | 2609 | return generic_file_read_iter(kio, iter);
|
---|
[77873] | 2610 | # else
|
---|
| 2611 | return generic_file_aio_read(kio, iov, cSegs, offFile);
|
---|
| 2612 | # endif
|
---|
| 2613 | }
|
---|
[77626] | 2614 |
|
---|
| 2615 | /*
|
---|
| 2616 | * Now now we reject async I/O requests.
|
---|
| 2617 | */
|
---|
| 2618 | if (!is_sync_kiocb(kio)) {
|
---|
| 2619 | SFLOGFLOW(("vbsf_reg_read_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
|
---|
| 2620 | return -EOPNOTSUPP;
|
---|
| 2621 | }
|
---|
| 2622 |
|
---|
| 2623 | /*
|
---|
| 2624 | * For small requests, try use an embedded buffer provided we get a heap block
|
---|
| 2625 | * that does not cross page boundraries (see host code).
|
---|
| 2626 | */
|
---|
| 2627 | if (cbToRead <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) /* see allocator */) {
|
---|
| 2628 | uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + cbToRead;
|
---|
| 2629 | VBOXSFREADEMBEDDEDREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
|
---|
| 2630 | if (pReq) {
|
---|
| 2631 | if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
|
---|
| 2632 | ssize_t cbRet;
|
---|
[77951] | 2633 | int vrc = VbglR0SfHostReqReadEmbedded(pSuperInfo->map.root, pReq, sf_r->Handle.hHost,
|
---|
| 2634 | kio->ki_pos, (uint32_t)cbToRead);
|
---|
[77626] | 2635 | if (RT_SUCCESS(vrc)) {
|
---|
| 2636 | cbRet = pReq->Parms.cb32Read.u.value32;
|
---|
| 2637 | AssertStmt(cbRet <= (ssize_t)cbToRead, cbRet = cbToRead);
|
---|
| 2638 | if (copy_to_iter(pReq->abData, cbRet, iter) == cbRet) {
|
---|
| 2639 | kio->ki_pos += cbRet;
|
---|
| 2640 | if (cbRet < cbToRead)
|
---|
| 2641 | iov_iter_truncate(iter, 0);
|
---|
| 2642 | } else
|
---|
| 2643 | cbRet = -EFAULT;
|
---|
| 2644 | } else
|
---|
| 2645 | cbRet = -EPROTO;
|
---|
| 2646 | VbglR0PhysHeapFree(pReq);
|
---|
| 2647 | SFLOGFLOW(("vbsf_reg_read_iter: returns %#zx (%zd)\n", cbRet, cbRet));
|
---|
| 2648 | return cbRet;
|
---|
| 2649 | }
|
---|
| 2650 | VbglR0PhysHeapFree(pReq);
|
---|
| 2651 | }
|
---|
| 2652 | }
|
---|
| 2653 |
|
---|
| 2654 | /*
|
---|
| 2655 | * Otherwise do the page locking thing.
|
---|
| 2656 | */
|
---|
[77951] | 2657 | return vbsf_reg_read_iter_locking(kio, iter, cbToRead, pSuperInfo, sf_r);
|
---|
[77626] | 2658 | }
|
---|
| 2659 |
|
---|
| 2660 |
|
---|
[77631] | 2661 | /**
|
---|
| 2662 | * Worker for vbsf_reg_write_iter() that deals with larger writes using page
|
---|
| 2663 | * locking.
|
---|
| 2664 | */
|
---|
| 2665 | static ssize_t vbsf_reg_write_iter_locking(struct kiocb *kio, struct iov_iter *iter, size_t cbToWrite, loff_t offFile,
|
---|
[77976] | 2666 | struct vbsf_super_info *pSuperInfo, struct vbsf_reg_info *sf_r, struct inode *inode,
|
---|
| 2667 | struct vbsf_inode_info *sf_i, struct address_space *mapping, bool fAppend)
|
---|
[77631] | 2668 | {
|
---|
| 2669 | /*
|
---|
| 2670 | * Estimate how many pages we may possible submit in a single request so
|
---|
| 2671 | * that we can allocate matching request buffer and page array.
|
---|
| 2672 | */
|
---|
| 2673 | struct page *apPagesStack[16];
|
---|
| 2674 | struct page **papPages = &apPagesStack[0];
|
---|
| 2675 | struct page **papPagesFree = NULL;
|
---|
| 2676 | VBOXSFWRITEPGLSTREQ *pReq;
|
---|
| 2677 | ssize_t cbRet = 0;
|
---|
| 2678 | size_t cMaxPages = vbsf_iter_max_span_of_pages(iter);
|
---|
[77951] | 2679 | cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages);
|
---|
[77631] | 2680 |
|
---|
| 2681 | pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
|
---|
| 2682 | while (!pReq && cMaxPages > 4) {
|
---|
| 2683 | cMaxPages /= 2;
|
---|
| 2684 | pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
|
---|
| 2685 | }
|
---|
| 2686 | if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
|
---|
| 2687 | papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
|
---|
| 2688 | if (pReq && papPages) {
|
---|
| 2689 |
|
---|
| 2690 | /*
|
---|
| 2691 | * The write loop.
|
---|
| 2692 | */
|
---|
| 2693 | struct vbsf_iter_stash Stash = VBSF_ITER_STASH_INITIALIZER;
|
---|
| 2694 | do {
|
---|
| 2695 | /*
|
---|
| 2696 | * Grab as many pages as we can. This means that if adjacent
|
---|
| 2697 | * segments both starts and ends at a page boundrary, we can
|
---|
| 2698 | * do them both in the same transfer from the host.
|
---|
| 2699 | */
|
---|
| 2700 | size_t cPages = 0;
|
---|
| 2701 | size_t cbChunk = 0;
|
---|
| 2702 | size_t offPage0 = 0;
|
---|
| 2703 | int rc = vbsf_iter_lock_pages(iter, false /*fWrite*/, &Stash, cMaxPages, papPages, &cPages, &offPage0, &cbChunk);
|
---|
| 2704 | if (rc == 0) {
|
---|
| 2705 | size_t iPage = cPages;
|
---|
| 2706 | while (iPage-- > 0)
|
---|
| 2707 | pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
|
---|
| 2708 | pReq->PgLst.offFirstPage = (uint16_t)offPage0;
|
---|
| 2709 | AssertStmt(cbChunk <= cbToWrite, cbChunk = cbToWrite);
|
---|
| 2710 | } else {
|
---|
| 2711 | cbRet = rc;
|
---|
| 2712 | break;
|
---|
| 2713 | }
|
---|
| 2714 |
|
---|
| 2715 | /*
|
---|
| 2716 | * Issue the request and unlock the pages.
|
---|
| 2717 | */
|
---|
[77951] | 2718 | rc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
|
---|
[77959] | 2719 | sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
|
---|
[77631] | 2720 | SFLOGFLOW(("vbsf_reg_write_iter_locking: VbglR0SfHostReqWritePgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n",
|
---|
| 2721 | rc, pReq->Parms.cb32Write.u.value32, cbChunk, cbToWrite, cPages, offPage0));
|
---|
| 2722 | if (RT_SUCCESS(rc)) {
|
---|
| 2723 | /*
|
---|
| 2724 | * Success, advance position and buffer.
|
---|
| 2725 | */
|
---|
| 2726 | uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
|
---|
| 2727 | AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
|
---|
[77770] | 2728 |
|
---|
[77853] | 2729 | vbsf_reg_write_sync_page_cache(mapping, offFile, cbActual, NULL /*pbSrcBuf*/, papPages, offPage0, cPages);
|
---|
[77770] | 2730 | vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
|
---|
| 2731 |
|
---|
[77631] | 2732 | cbRet += cbActual;
|
---|
[77976] | 2733 | cbToWrite -= cbActual;
|
---|
| 2734 |
|
---|
[77631] | 2735 | offFile += cbActual;
|
---|
[77976] | 2736 | if (fAppend && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
|
---|
| 2737 | offFile = pReq->Parms.off64Write.u.value64;
|
---|
[77631] | 2738 | kio->ki_pos = offFile;
|
---|
| 2739 | if (offFile > i_size_read(inode))
|
---|
| 2740 | i_size_write(inode, offFile);
|
---|
[77976] | 2741 |
|
---|
[77631] | 2742 | sf_i->force_restat = 1; /* mtime (and size) may have changed */
|
---|
| 2743 |
|
---|
| 2744 | /*
|
---|
| 2745 | * Are we done already?
|
---|
| 2746 | */
|
---|
| 2747 | if (!cbToWrite)
|
---|
| 2748 | break;
|
---|
| 2749 | if (cbActual < cbChunk) { /* We ASSUME end-of-file here. */
|
---|
| 2750 | if (vbsf_iter_rewind(iter, &Stash, cbChunk - cbActual, cbActual))
|
---|
| 2751 | iov_iter_truncate(iter, 0);
|
---|
| 2752 | break;
|
---|
| 2753 | }
|
---|
| 2754 | } else {
|
---|
| 2755 | /*
|
---|
| 2756 | * Try rewind the iter structure.
|
---|
| 2757 | */
|
---|
[77770] | 2758 | bool fRewindOkay;
|
---|
| 2759 | vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
|
---|
| 2760 | fRewindOkay = vbsf_iter_rewind(iter, &Stash, cbChunk, cbChunk);
|
---|
[77631] | 2761 | if (rc == VERR_NO_MEMORY && cMaxPages > 4 && fRewindOkay) {
|
---|
| 2762 | /*
|
---|
| 2763 | * The host probably doesn't have enough heap to handle the
|
---|
| 2764 | * request, reduce the page count and retry.
|
---|
| 2765 | */
|
---|
| 2766 | cMaxPages /= 4;
|
---|
| 2767 | Assert(cMaxPages > 0);
|
---|
| 2768 | } else {
|
---|
| 2769 | /*
|
---|
| 2770 | * If we've successfully written stuff, return it rather than
|
---|
| 2771 | * the error. (Not sure if this is such a great idea...)
|
---|
| 2772 | */
|
---|
| 2773 | if (cbRet <= 0)
|
---|
| 2774 | cbRet = -EPROTO;
|
---|
| 2775 | break;
|
---|
| 2776 | }
|
---|
| 2777 | }
|
---|
| 2778 | } while (cbToWrite > 0);
|
---|
| 2779 |
|
---|
| 2780 | vbsf_iter_cleanup_stash(iter, &Stash);
|
---|
| 2781 | }
|
---|
| 2782 | else
|
---|
| 2783 | cbRet = -ENOMEM;
|
---|
| 2784 | if (papPagesFree)
|
---|
| 2785 | kfree(papPages);
|
---|
| 2786 | if (pReq)
|
---|
| 2787 | VbglR0PhysHeapFree(pReq);
|
---|
| 2788 | SFLOGFLOW(("vbsf_reg_write_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
|
---|
| 2789 | return cbRet;
|
---|
| 2790 | }
|
---|
| 2791 |
|
---|
| 2792 |
|
---|
| 2793 | /**
|
---|
| 2794 | * Write from I/O vector iterator.
|
---|
| 2795 | *
|
---|
| 2796 | * @returns Number of bytes written on success, negative errno on error.
|
---|
| 2797 | * @param kio The kernel I/O control block (or something like that).
|
---|
| 2798 | * @param iter The I/O vector iterator describing the buffer.
|
---|
| 2799 | */
|
---|
[85698] | 2800 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77626] | 2801 | static ssize_t vbsf_reg_write_iter(struct kiocb *kio, struct iov_iter *iter)
|
---|
[77873] | 2802 | # else
|
---|
| 2803 | static ssize_t vbsf_reg_aio_write(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile)
|
---|
| 2804 | # endif
|
---|
[77626] | 2805 | {
|
---|
[85698] | 2806 | # if RTLNX_VER_MAX(3,16,0)
|
---|
[77951] | 2807 | struct vbsf_iov_iter fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 1 /*write*/);
|
---|
| 2808 | struct vbsf_iov_iter *iter = &fake_iter;
|
---|
[77873] | 2809 | # endif
|
---|
[77951] | 2810 | size_t cbToWrite = iov_iter_count(iter);
|
---|
| 2811 | struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
|
---|
| 2812 | struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
|
---|
| 2813 | struct address_space *mapping = inode->i_mapping;
|
---|
[77631] | 2814 |
|
---|
[77951] | 2815 | struct vbsf_reg_info *sf_r = kio->ki_filp->private_data;
|
---|
| 2816 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
[85698] | 2817 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77951] | 2818 | loff_t offFile = kio->ki_pos;
|
---|
[77873] | 2819 | # endif
|
---|
[85698] | 2820 | # if RTLNX_VER_MIN(4,1,0)
|
---|
[77976] | 2821 | bool const fAppend = RT_BOOL(kio->ki_flags & IOCB_APPEND);
|
---|
| 2822 | # else
|
---|
| 2823 | bool const fAppend = RT_BOOL(kio->ki_filp->f_flags & O_APPEND);
|
---|
| 2824 | # endif
|
---|
[77631] | 2825 |
|
---|
[77976] | 2826 |
|
---|
[77631] | 2827 | SFLOGFLOW(("vbsf_reg_write_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
|
---|
| 2828 | inode, kio->ki_filp, cbToWrite, offFile, iter->type));
|
---|
| 2829 | AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
|
---|
| 2830 |
|
---|
| 2831 | /*
|
---|
[77976] | 2832 | * Enforce APPEND flag (more later).
|
---|
[77631] | 2833 | */
|
---|
[77976] | 2834 | if (fAppend)
|
---|
[77631] | 2835 | kio->ki_pos = offFile = i_size_read(inode);
|
---|
| 2836 |
|
---|
| 2837 | /*
|
---|
| 2838 | * Do we have anything at all to do here?
|
---|
| 2839 | */
|
---|
| 2840 | if (!cbToWrite)
|
---|
| 2841 | return 0;
|
---|
| 2842 |
|
---|
[77953] | 2843 | /** @todo Implement the read-write caching mode. */
|
---|
| 2844 |
|
---|
[77631] | 2845 | /*
|
---|
| 2846 | * Now now we reject async I/O requests.
|
---|
| 2847 | */
|
---|
| 2848 | if (!is_sync_kiocb(kio)) {
|
---|
| 2849 | SFLOGFLOW(("vbsf_reg_write_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
|
---|
| 2850 | return -EOPNOTSUPP;
|
---|
| 2851 | }
|
---|
| 2852 |
|
---|
| 2853 | /*
|
---|
| 2854 | * If there are active writable mappings, coordinate with any
|
---|
| 2855 | * pending writes via those.
|
---|
| 2856 | */
|
---|
| 2857 | if ( mapping
|
---|
| 2858 | && mapping->nrpages > 0
|
---|
| 2859 | && mapping_writably_mapped(mapping)) {
|
---|
[85698] | 2860 | # if RTLNX_VER_MIN(2,6,32)
|
---|
[77631] | 2861 | int err = filemap_fdatawait_range(mapping, offFile, offFile + cbToWrite - 1);
|
---|
| 2862 | if (err)
|
---|
| 2863 | return err;
|
---|
[77873] | 2864 | # else
|
---|
[77631] | 2865 | /** @todo ... */
|
---|
[77873] | 2866 | # endif
|
---|
[77631] | 2867 | }
|
---|
| 2868 |
|
---|
| 2869 | /*
|
---|
| 2870 | * For small requests, try use an embedded buffer provided we get a heap block
|
---|
| 2871 | * that does not cross page boundraries (see host code).
|
---|
| 2872 | */
|
---|
| 2873 | if (cbToWrite <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) /* see allocator */) {
|
---|
| 2874 | uint32_t const cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + cbToWrite;
|
---|
| 2875 | VBOXSFWRITEEMBEDDEDREQ *pReq = (VBOXSFWRITEEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
|
---|
| 2876 | if (pReq) {
|
---|
| 2877 | if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
|
---|
| 2878 | ssize_t cbRet;
|
---|
| 2879 | if (copy_from_iter(pReq->abData, cbToWrite, iter) == cbToWrite) {
|
---|
[77951] | 2880 | int vrc = VbglR0SfHostReqWriteEmbedded(pSuperInfo->map.root, pReq, sf_r->Handle.hHost,
|
---|
[77631] | 2881 | offFile, (uint32_t)cbToWrite);
|
---|
[77959] | 2882 | sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
|
---|
[77631] | 2883 | if (RT_SUCCESS(vrc)) {
|
---|
| 2884 | cbRet = pReq->Parms.cb32Write.u.value32;
|
---|
| 2885 | AssertStmt(cbRet <= (ssize_t)cbToWrite, cbRet = cbToWrite);
|
---|
[77770] | 2886 | vbsf_reg_write_sync_page_cache(mapping, offFile, (uint32_t)cbRet, pReq->abData,
|
---|
[77853] | 2887 | NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
|
---|
[77976] | 2888 |
|
---|
| 2889 | offFile += cbRet;
|
---|
| 2890 | if (fAppend && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
|
---|
| 2891 | offFile = pReq->Parms.off64Write.u.value64;
|
---|
| 2892 | kio->ki_pos = offFile;
|
---|
[77631] | 2893 | if (offFile > i_size_read(inode))
|
---|
| 2894 | i_size_write(inode, offFile);
|
---|
[77976] | 2895 |
|
---|
[85698] | 2896 | # if RTLNX_VER_MIN(4,11,0)
|
---|
[77631] | 2897 | if ((size_t)cbRet < cbToWrite)
|
---|
| 2898 | iov_iter_revert(iter, cbToWrite - cbRet);
|
---|
| 2899 | # endif
|
---|
| 2900 | } else
|
---|
| 2901 | cbRet = -EPROTO;
|
---|
| 2902 | sf_i->force_restat = 1; /* mtime (and size) may have changed */
|
---|
| 2903 | } else
|
---|
| 2904 | cbRet = -EFAULT;
|
---|
| 2905 | VbglR0PhysHeapFree(pReq);
|
---|
| 2906 | SFLOGFLOW(("vbsf_reg_write_iter: returns %#zx (%zd)\n", cbRet, cbRet));
|
---|
| 2907 | return cbRet;
|
---|
| 2908 | }
|
---|
| 2909 | VbglR0PhysHeapFree(pReq);
|
---|
| 2910 | }
|
---|
| 2911 | }
|
---|
| 2912 |
|
---|
| 2913 | /*
|
---|
| 2914 | * Otherwise do the page locking thing.
|
---|
| 2915 | */
|
---|
[77976] | 2916 | return vbsf_reg_write_iter_locking(kio, iter, cbToWrite, offFile, pSuperInfo, sf_r, inode, sf_i, mapping, fAppend);
|
---|
[77626] | 2917 | }
|
---|
| 2918 |
|
---|
[77873] | 2919 | #endif /* >= 2.6.19 */
|
---|
[77626] | 2920 |
|
---|
| 2921 | /**
|
---|
[77704] | 2922 | * Used by vbsf_reg_open() and vbsf_inode_atomic_open() to
|
---|
| 2923 | *
|
---|
| 2924 | * @returns shared folders create flags.
|
---|
| 2925 | * @param fLnxOpen The linux O_XXX flags to convert.
|
---|
| 2926 | * @param pfHandle Pointer to vbsf_handle::fFlags.
|
---|
| 2927 | * @param pszCaller Caller, for logging purposes.
|
---|
| 2928 | */
|
---|
| 2929 | uint32_t vbsf_linux_oflags_to_vbox(unsigned fLnxOpen, uint32_t *pfHandle, const char *pszCaller)
|
---|
| 2930 | {
|
---|
| 2931 | uint32_t fVBoxFlags = SHFL_CF_ACCESS_DENYNONE;
|
---|
| 2932 |
|
---|
| 2933 | /*
|
---|
| 2934 | * Disposition.
|
---|
| 2935 | */
|
---|
| 2936 | if (fLnxOpen & O_CREAT) {
|
---|
| 2937 | Log(("%s: O_CREAT set\n", pszCaller));
|
---|
| 2938 | fVBoxFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
|
---|
| 2939 | if (fLnxOpen & O_EXCL) {
|
---|
| 2940 | Log(("%s: O_EXCL set\n", pszCaller));
|
---|
| 2941 | fVBoxFlags |= SHFL_CF_ACT_FAIL_IF_EXISTS;
|
---|
| 2942 | } else if (fLnxOpen & O_TRUNC) {
|
---|
| 2943 | Log(("%s: O_TRUNC set\n", pszCaller));
|
---|
| 2944 | fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
|
---|
| 2945 | } else
|
---|
| 2946 | fVBoxFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
|
---|
| 2947 | } else {
|
---|
| 2948 | fVBoxFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
|
---|
| 2949 | if (fLnxOpen & O_TRUNC) {
|
---|
| 2950 | Log(("%s: O_TRUNC set\n", pszCaller));
|
---|
| 2951 | fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
|
---|
| 2952 | }
|
---|
| 2953 | }
|
---|
| 2954 |
|
---|
| 2955 | /*
|
---|
| 2956 | * Access.
|
---|
| 2957 | */
|
---|
| 2958 | switch (fLnxOpen & O_ACCMODE) {
|
---|
| 2959 | case O_RDONLY:
|
---|
| 2960 | fVBoxFlags |= SHFL_CF_ACCESS_READ;
|
---|
| 2961 | *pfHandle |= VBSF_HANDLE_F_READ;
|
---|
| 2962 | break;
|
---|
| 2963 |
|
---|
| 2964 | case O_WRONLY:
|
---|
| 2965 | fVBoxFlags |= SHFL_CF_ACCESS_WRITE;
|
---|
| 2966 | *pfHandle |= VBSF_HANDLE_F_WRITE;
|
---|
| 2967 | break;
|
---|
| 2968 |
|
---|
| 2969 | case O_RDWR:
|
---|
| 2970 | fVBoxFlags |= SHFL_CF_ACCESS_READWRITE;
|
---|
| 2971 | *pfHandle |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE;
|
---|
| 2972 | break;
|
---|
| 2973 |
|
---|
| 2974 | default:
|
---|
| 2975 | BUG();
|
---|
| 2976 | }
|
---|
| 2977 |
|
---|
| 2978 | if (fLnxOpen & O_APPEND) {
|
---|
| 2979 | Log(("%s: O_APPEND set\n", pszCaller));
|
---|
| 2980 | fVBoxFlags |= SHFL_CF_ACCESS_APPEND;
|
---|
| 2981 | *pfHandle |= VBSF_HANDLE_F_APPEND;
|
---|
| 2982 | }
|
---|
| 2983 |
|
---|
| 2984 | /*
|
---|
| 2985 | * Only directories?
|
---|
| 2986 | */
|
---|
| 2987 | if (fLnxOpen & O_DIRECTORY) {
|
---|
| 2988 | Log(("%s: O_DIRECTORY set\n", pszCaller));
|
---|
| 2989 | fVBoxFlags |= SHFL_CF_DIRECTORY;
|
---|
| 2990 | }
|
---|
| 2991 |
|
---|
| 2992 | return fVBoxFlags;
|
---|
| 2993 | }
|
---|
| 2994 |
|
---|
| 2995 |
|
---|
| 2996 | /**
|
---|
[30175] | 2997 | * Open a regular file.
|
---|
| 2998 | *
|
---|
| 2999 | * @param inode the inode
|
---|
| 3000 | * @param file the file
|
---|
| 3001 | * @returns 0 on success, Linux error code otherwise
|
---|
| 3002 | */
|
---|
[77529] | 3003 | static int vbsf_reg_open(struct inode *inode, struct file *file)
|
---|
[2614] | 3004 | {
|
---|
[77526] | 3005 | int rc, rc_linux = 0;
|
---|
[77951] | 3006 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
| 3007 | struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
|
---|
| 3008 | struct dentry *dentry = VBSF_GET_F_DENTRY(file);
|
---|
[77873] | 3009 | struct vbsf_reg_info *sf_r;
|
---|
| 3010 | VBOXSFCREATEREQ *pReq;
|
---|
[2614] | 3011 |
|
---|
[77532] | 3012 | SFLOGFLOW(("vbsf_reg_open: inode=%p file=%p flags=%#x %s\n", inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL));
|
---|
[77951] | 3013 | Assert(pSuperInfo);
|
---|
| 3014 | Assert(sf_i);
|
---|
[2614] | 3015 |
|
---|
[77526] | 3016 | sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
|
---|
| 3017 | if (!sf_r) {
|
---|
| 3018 | LogRelFunc(("could not allocate reg info\n"));
|
---|
| 3019 | return -ENOMEM;
|
---|
| 3020 | }
|
---|
[2614] | 3021 |
|
---|
[77526] | 3022 | RTListInit(&sf_r->Handle.Entry);
|
---|
| 3023 | sf_r->Handle.cRefs = 1;
|
---|
[77536] | 3024 | sf_r->Handle.fFlags = VBSF_HANDLE_F_FILE | VBSF_HANDLE_F_MAGIC;
|
---|
[77526] | 3025 | sf_r->Handle.hHost = SHFL_HANDLE_NIL;
|
---|
[77458] | 3026 |
|
---|
[77526] | 3027 | /* Already open? */
|
---|
| 3028 | if (sf_i->handle != SHFL_HANDLE_NIL) {
|
---|
| 3029 | /*
|
---|
[77529] | 3030 | * This inode was created with vbsf_create_worker(). Check the CreateFlags:
|
---|
[77526] | 3031 | * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
|
---|
| 3032 | * about the access flags (SHFL_CF_ACCESS_*).
|
---|
| 3033 | */
|
---|
| 3034 | sf_i->force_restat = 1;
|
---|
| 3035 | sf_r->Handle.hHost = sf_i->handle;
|
---|
| 3036 | sf_i->handle = SHFL_HANDLE_NIL;
|
---|
| 3037 | file->private_data = sf_r;
|
---|
[77458] | 3038 |
|
---|
[77536] | 3039 | sf_r->Handle.fFlags |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE; /** @todo fix */
|
---|
[77529] | 3040 | vbsf_handle_append(sf_i, &sf_r->Handle);
|
---|
| 3041 | SFLOGFLOW(("vbsf_reg_open: returns 0 (#1) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
|
---|
[77526] | 3042 | return 0;
|
---|
| 3043 | }
|
---|
[28253] | 3044 |
|
---|
[77526] | 3045 | pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + sf_i->path->u16Size);
|
---|
| 3046 | if (!pReq) {
|
---|
| 3047 | kfree(sf_r);
|
---|
| 3048 | LogRelFunc(("Failed to allocate a VBOXSFCREATEREQ buffer!\n"));
|
---|
| 3049 | return -ENOMEM;
|
---|
| 3050 | }
|
---|
| 3051 | memcpy(&pReq->StrPath, sf_i->path, SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size);
|
---|
| 3052 | RT_ZERO(pReq->CreateParms);
|
---|
[77532] | 3053 | pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
|
---|
[77054] | 3054 |
|
---|
[77532] | 3055 | /* We check the value of pReq->CreateParms.Handle afterwards to
|
---|
| 3056 | * find out if the call succeeded or failed, as the API does not seem
|
---|
| 3057 | * to cleanly distinguish error and informational messages.
|
---|
[77526] | 3058 | *
|
---|
[77532] | 3059 | * Furthermore, we must set pReq->CreateParms.Handle to SHFL_HANDLE_NIL
|
---|
| 3060 | * to make the shared folders host service use our fMode parameter */
|
---|
[2614] | 3061 |
|
---|
[77704] | 3062 | /* We ignore O_EXCL, as the Linux kernel seems to call create
|
---|
| 3063 | beforehand itself, so O_EXCL should always fail. */
|
---|
| 3064 | pReq->CreateParms.CreateFlags = vbsf_linux_oflags_to_vbox(file->f_flags & ~O_EXCL, &sf_r->Handle.fFlags, __FUNCTION__);
|
---|
[77532] | 3065 | pReq->CreateParms.Info.Attr.fMode = inode->i_mode;
|
---|
| 3066 | LogFunc(("vbsf_reg_open: calling VbglR0SfHostReqCreate, file %s, flags=%#x, %#x\n",
|
---|
| 3067 | sf_i->path->String.utf8, file->f_flags, pReq->CreateParms.CreateFlags));
|
---|
[77951] | 3068 | rc = VbglR0SfHostReqCreate(pSuperInfo->map.root, pReq);
|
---|
[77526] | 3069 | if (RT_FAILURE(rc)) {
|
---|
[77532] | 3070 | LogFunc(("VbglR0SfHostReqCreate failed flags=%d,%#x rc=%Rrc\n", file->f_flags, pReq->CreateParms.CreateFlags, rc));
|
---|
[77526] | 3071 | kfree(sf_r);
|
---|
| 3072 | VbglR0PhysHeapFree(pReq);
|
---|
| 3073 | return -RTErrConvertToErrno(rc);
|
---|
| 3074 | }
|
---|
[2614] | 3075 |
|
---|
[77532] | 3076 | if (pReq->CreateParms.Handle != SHFL_HANDLE_NIL) {
|
---|
[77529] | 3077 | vbsf_dentry_chain_increase_ttl(dentry);
|
---|
[77976] | 3078 | vbsf_update_inode(inode, sf_i, &pReq->CreateParms.Info, pSuperInfo, false /*fInodeLocked*/, 0 /*fSetAttrs*/);
|
---|
[77526] | 3079 | rc_linux = 0;
|
---|
| 3080 | } else {
|
---|
[77532] | 3081 | switch (pReq->CreateParms.Result) {
|
---|
| 3082 | case SHFL_PATH_NOT_FOUND:
|
---|
[77976] | 3083 | vbsf_dentry_invalidate_ttl(dentry);
|
---|
[77532] | 3084 | rc_linux = -ENOENT;
|
---|
| 3085 | break;
|
---|
| 3086 | case SHFL_FILE_NOT_FOUND:
|
---|
[77976] | 3087 | vbsf_dentry_invalidate_ttl(dentry);
|
---|
[77532] | 3088 | /** @todo sf_dentry_increase_parent_ttl(file->f_dentry); if we can trust it. */
|
---|
| 3089 | rc_linux = -ENOENT;
|
---|
| 3090 | break;
|
---|
| 3091 | case SHFL_FILE_EXISTS:
|
---|
| 3092 | vbsf_dentry_chain_increase_ttl(dentry);
|
---|
[77976] | 3093 | vbsf_update_inode(inode, sf_i, &pReq->CreateParms.Info, pSuperInfo, false /*fInodeLocked*/, 0 /*fSetAttrs*/);
|
---|
[77532] | 3094 | rc_linux = -EEXIST;
|
---|
| 3095 | break;
|
---|
| 3096 | default:
|
---|
| 3097 | vbsf_dentry_chain_increase_parent_ttl(dentry);
|
---|
| 3098 | rc_linux = 0;
|
---|
| 3099 | break;
|
---|
[77526] | 3100 | }
|
---|
| 3101 | }
|
---|
[2614] | 3102 |
|
---|
[77532] | 3103 | sf_r->Handle.hHost = pReq->CreateParms.Handle;
|
---|
[77526] | 3104 | file->private_data = sf_r;
|
---|
[77529] | 3105 | vbsf_handle_append(sf_i, &sf_r->Handle);
|
---|
[77526] | 3106 | VbglR0PhysHeapFree(pReq);
|
---|
[77529] | 3107 | SFLOGFLOW(("vbsf_reg_open: returns 0 (#2) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
|
---|
[77526] | 3108 | return rc_linux;
|
---|
[2614] | 3109 | }
|
---|
| 3110 |
|
---|
[77458] | 3111 |
|
---|
[30175] | 3112 | /**
|
---|
| 3113 | * Close a regular file.
|
---|
| 3114 | *
|
---|
| 3115 | * @param inode the inode
|
---|
| 3116 | * @param file the file
|
---|
| 3117 | * @returns 0 on success, Linux error code otherwise
|
---|
| 3118 | */
|
---|
[77529] | 3119 | static int vbsf_reg_release(struct inode *inode, struct file *file)
|
---|
[2614] | 3120 | {
|
---|
[77530] | 3121 | struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
|
---|
[77704] | 3122 | struct vbsf_reg_info *sf_r = file->private_data;
|
---|
[2614] | 3123 |
|
---|
[77529] | 3124 | SFLOGFLOW(("vbsf_reg_release: inode=%p file=%p\n", inode, file));
|
---|
[77704] | 3125 | if (sf_r) {
|
---|
[77951] | 3126 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
[77967] | 3127 | struct address_space *mapping = inode->i_mapping;
|
---|
[77951] | 3128 | Assert(pSuperInfo);
|
---|
[2614] | 3129 |
|
---|
[77967] | 3130 | /* If we're closing the last handle for this inode, make sure the flush
|
---|
| 3131 | the mapping or we'll end up in vbsf_writepage without a handle. */
|
---|
| 3132 | if ( mapping
|
---|
| 3133 | && mapping->nrpages > 0
|
---|
| 3134 | /** @todo && last writable handle */ ) {
|
---|
[85698] | 3135 | #if RTLNX_VER_MIN(2,4,25)
|
---|
[77967] | 3136 | if (filemap_fdatawrite(mapping) != -EIO)
|
---|
| 3137 | #else
|
---|
| 3138 | if ( filemap_fdatasync(mapping) == 0
|
---|
| 3139 | && fsync_inode_data_buffers(inode) == 0)
|
---|
[39789] | 3140 | #endif
|
---|
[77967] | 3141 | filemap_fdatawait(inode->i_mapping);
|
---|
| 3142 | }
|
---|
[2614] | 3143 |
|
---|
[77704] | 3144 | /* Release sf_r, closing the handle if we're the last user. */
|
---|
| 3145 | file->private_data = NULL;
|
---|
[77951] | 3146 | vbsf_handle_release(&sf_r->Handle, pSuperInfo, "vbsf_reg_release");
|
---|
[77458] | 3147 |
|
---|
[77704] | 3148 | sf_i->handle = SHFL_HANDLE_NIL;
|
---|
| 3149 | }
|
---|
[77526] | 3150 | return 0;
|
---|
[2614] | 3151 | }
|
---|
| 3152 |
|
---|
[77873] | 3153 |
|
---|
[77419] | 3154 | /**
|
---|
| 3155 | * Wrapper around generic/default seek function that ensures that we've got
|
---|
| 3156 | * the up-to-date file size when doing anything relative to EOF.
|
---|
| 3157 | *
|
---|
| 3158 | * The issue is that the host may extend the file while we weren't looking and
|
---|
| 3159 | * if the caller wishes to append data, it may end up overwriting existing data
|
---|
| 3160 | * if we operate with a stale size. So, we always retrieve the file size on EOF
|
---|
| 3161 | * relative seeks.
|
---|
| 3162 | */
|
---|
[77529] | 3163 | static loff_t vbsf_reg_llseek(struct file *file, loff_t off, int whence)
|
---|
[2614] | 3164 | {
|
---|
[77529] | 3165 | SFLOGFLOW(("vbsf_reg_llseek: file=%p off=%lld whence=%d\n", file, off, whence));
|
---|
[77458] | 3166 |
|
---|
[77526] | 3167 | switch (whence) {
|
---|
[77419] | 3168 | #ifdef SEEK_HOLE
|
---|
[77526] | 3169 | case SEEK_HOLE:
|
---|
| 3170 | case SEEK_DATA:
|
---|
[65992] | 3171 | #endif
|
---|
[77526] | 3172 | case SEEK_END: {
|
---|
[77530] | 3173 | struct vbsf_reg_info *sf_r = file->private_data;
|
---|
[77559] | 3174 | int rc = vbsf_inode_revalidate_with_handle(VBSF_GET_F_DENTRY(file), sf_r->Handle.hHost,
|
---|
| 3175 | true /*fForce*/, false /*fInodeLocked*/);
|
---|
[77526] | 3176 | if (rc == 0)
|
---|
| 3177 | break;
|
---|
| 3178 | return rc;
|
---|
| 3179 | }
|
---|
| 3180 | }
|
---|
[2614] | 3181 |
|
---|
[85698] | 3182 | #if RTLNX_VER_MIN(2,4,8)
|
---|
[77526] | 3183 | return generic_file_llseek(file, off, whence);
|
---|
[9179] | 3184 | #else
|
---|
[77526] | 3185 | return default_llseek(file, off, whence);
|
---|
[9179] | 3186 | #endif
|
---|
[77419] | 3187 | }
|
---|
[2614] | 3188 |
|
---|
[77873] | 3189 |
|
---|
[77419] | 3190 | /**
|
---|
| 3191 | * Flush region of file - chiefly mmap/msync.
|
---|
| 3192 | *
|
---|
| 3193 | * We cannot use the noop_fsync / simple_sync_file here as that means
|
---|
| 3194 | * msync(,,MS_SYNC) will return before the data hits the host, thereby
|
---|
| 3195 | * causing coherency issues with O_DIRECT access to the same file as
|
---|
| 3196 | * well as any host interaction with the file.
|
---|
| 3197 | */
|
---|
[85698] | 3198 | #if RTLNX_VER_MIN(3,1,0) \
|
---|
| 3199 | || (defined(CONFIG_SUSE_KERNEL) && RTLNX_VER_MIN(3,0,101) /** @todo figure when exactly */)
|
---|
[77529] | 3200 | static int vbsf_reg_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
---|
[77419] | 3201 | {
|
---|
[85698] | 3202 | # if RTLNX_VER_MIN(3,16,0)
|
---|
[77526] | 3203 | return __generic_file_fsync(file, start, end, datasync);
|
---|
[77419] | 3204 | # else
|
---|
[77526] | 3205 | return generic_file_fsync(file, start, end, datasync);
|
---|
[77419] | 3206 | # endif
|
---|
[2614] | 3207 | }
|
---|
[85698] | 3208 | #elif RTLNX_VER_MIN(2,6,35)
|
---|
[77529] | 3209 | static int vbsf_reg_fsync(struct file *file, int datasync)
|
---|
[77419] | 3210 | {
|
---|
[77526] | 3211 | return generic_file_fsync(file, datasync);
|
---|
[77419] | 3212 | }
|
---|
| 3213 | #else /* < 2.6.35 */
|
---|
[77529] | 3214 | static int vbsf_reg_fsync(struct file *file, struct dentry *dentry, int datasync)
|
---|
[77419] | 3215 | {
|
---|
[85698] | 3216 | # if RTLNX_VER_MIN(2,6,31)
|
---|
[77526] | 3217 | return simple_fsync(file, dentry, datasync);
|
---|
[77419] | 3218 | # else
|
---|
[77526] | 3219 | int rc;
|
---|
| 3220 | struct inode *inode = dentry->d_inode;
|
---|
| 3221 | AssertReturn(inode, -EINVAL);
|
---|
[2614] | 3222 |
|
---|
[77526] | 3223 | /** @todo What about file_fsync()? (<= 2.5.11) */
|
---|
[2614] | 3224 |
|
---|
[85698] | 3225 | # if RTLNX_VER_MIN(2,5,12)
|
---|
[77526] | 3226 | rc = sync_mapping_buffers(inode->i_mapping);
|
---|
| 3227 | if ( rc == 0
|
---|
| 3228 | && (inode->i_state & I_DIRTY)
|
---|
| 3229 | && ((inode->i_state & I_DIRTY_DATASYNC) || !datasync)
|
---|
| 3230 | ) {
|
---|
| 3231 | struct writeback_control wbc = {
|
---|
| 3232 | .sync_mode = WB_SYNC_ALL,
|
---|
| 3233 | .nr_to_write = 0
|
---|
| 3234 | };
|
---|
| 3235 | rc = sync_inode(inode, &wbc);
|
---|
| 3236 | }
|
---|
[77419] | 3237 | # else /* < 2.5.12 */
|
---|
[77967] | 3238 | /** @todo
|
---|
| 3239 | * Somethings is buggy here or in the 2.4.21-27.EL kernel I'm testing on.
|
---|
| 3240 | *
|
---|
| 3241 | * In theory we shouldn't need to do anything here, since msync will call
|
---|
| 3242 | * writepage() on each dirty page and we write them out synchronously. So, the
|
---|
| 3243 | * problem is elsewhere... Doesn't happen all the time either. Sigh.
|
---|
| 3244 | */
|
---|
| 3245 | rc = fsync_inode_buffers(inode);
|
---|
[85698] | 3246 | # if RTLNX_VER_MIN(2,4,10)
|
---|
[77967] | 3247 | if (rc == 0 && datasync)
|
---|
| 3248 | rc = fsync_inode_data_buffers(inode);
|
---|
[77419] | 3249 | # endif
|
---|
[77967] | 3250 |
|
---|
[77419] | 3251 | # endif /* < 2.5.12 */
|
---|
[77526] | 3252 | return rc;
|
---|
[77419] | 3253 | # endif
|
---|
[2614] | 3254 | }
|
---|
[77419] | 3255 | #endif /* < 2.6.35 */
|
---|
[2614] | 3256 |
|
---|
[77419] | 3257 |
|
---|
[85698] | 3258 | #if RTLNX_VER_MIN(4,5,0)
|
---|
[77853] | 3259 | /**
|
---|
| 3260 | * Copy a datablock from one file to another on the host side.
|
---|
| 3261 | */
|
---|
| 3262 | static ssize_t vbsf_reg_copy_file_range(struct file *pFileSrc, loff_t offSrc, struct file *pFileDst, loff_t offDst,
|
---|
| 3263 | size_t cbRange, unsigned int fFlags)
|
---|
| 3264 | {
|
---|
| 3265 | ssize_t cbRet;
|
---|
| 3266 | if (g_uSfLastFunction >= SHFL_FN_COPY_FILE_PART) {
|
---|
| 3267 | struct inode *pInodeSrc = pFileSrc->f_inode;
|
---|
| 3268 | struct vbsf_inode_info *pInodeInfoSrc = VBSF_GET_INODE_INFO(pInodeSrc);
|
---|
| 3269 | struct vbsf_super_info *pSuperInfoSrc = VBSF_GET_SUPER_INFO(pInodeSrc->i_sb);
|
---|
| 3270 | struct vbsf_reg_info *pFileInfoSrc = (struct vbsf_reg_info *)pFileSrc->private_data;
|
---|
| 3271 | struct inode *pInodeDst = pInodeSrc;
|
---|
| 3272 | struct vbsf_inode_info *pInodeInfoDst = VBSF_GET_INODE_INFO(pInodeDst);
|
---|
| 3273 | struct vbsf_super_info *pSuperInfoDst = VBSF_GET_SUPER_INFO(pInodeDst->i_sb);
|
---|
| 3274 | struct vbsf_reg_info *pFileInfoDst = (struct vbsf_reg_info *)pFileDst->private_data;
|
---|
| 3275 | VBOXSFCOPYFILEPARTREQ *pReq;
|
---|
| 3276 |
|
---|
| 3277 | /*
|
---|
| 3278 | * Some extra validation.
|
---|
| 3279 | */
|
---|
| 3280 | AssertPtrReturn(pInodeInfoSrc, -EOPNOTSUPP);
|
---|
| 3281 | Assert(pInodeInfoSrc->u32Magic == SF_INODE_INFO_MAGIC);
|
---|
| 3282 | AssertPtrReturn(pInodeInfoDst, -EOPNOTSUPP);
|
---|
| 3283 | Assert(pInodeInfoDst->u32Magic == SF_INODE_INFO_MAGIC);
|
---|
| 3284 |
|
---|
[85698] | 3285 | # if RTLNX_VER_MAX(4,11,0)
|
---|
[77853] | 3286 | if (!S_ISREG(pInodeSrc->i_mode) || !S_ISREG(pInodeDst->i_mode))
|
---|
| 3287 | return S_ISDIR(pInodeSrc->i_mode) || S_ISDIR(pInodeDst->i_mode) ? -EISDIR : -EINVAL;
|
---|
| 3288 | # endif
|
---|
| 3289 |
|
---|
| 3290 | /*
|
---|
| 3291 | * Allocate the request and issue it.
|
---|
| 3292 | */
|
---|
| 3293 | pReq = (VBOXSFCOPYFILEPARTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
|
---|
| 3294 | if (pReq) {
|
---|
| 3295 | int vrc = VbglR0SfHostReqCopyFilePart(pSuperInfoSrc->map.root, pFileInfoSrc->Handle.hHost, offSrc,
|
---|
| 3296 | pSuperInfoDst->map.root, pFileInfoDst->Handle.hHost, offDst,
|
---|
| 3297 | cbRange, 0 /*fFlags*/, pReq);
|
---|
| 3298 | if (RT_SUCCESS(vrc))
|
---|
| 3299 | cbRet = pReq->Parms.cb64ToCopy.u.value64;
|
---|
| 3300 | else if (vrc == VERR_NOT_IMPLEMENTED)
|
---|
| 3301 | cbRet = -EOPNOTSUPP;
|
---|
| 3302 | else
|
---|
| 3303 | cbRet = -RTErrConvertToErrno(vrc);
|
---|
| 3304 |
|
---|
| 3305 | VbglR0PhysHeapFree(pReq);
|
---|
| 3306 | } else
|
---|
| 3307 | cbRet = -ENOMEM;
|
---|
| 3308 | } else {
|
---|
| 3309 | cbRet = -EOPNOTSUPP;
|
---|
| 3310 | }
|
---|
| 3311 | SFLOGFLOW(("vbsf_reg_copy_file_range: returns %zd\n", cbRet));
|
---|
| 3312 | return cbRet;
|
---|
| 3313 | }
|
---|
| 3314 | #endif /* > 4.5 */
|
---|
| 3315 |
|
---|
[77873] | 3316 |
|
---|
[77731] | 3317 | #ifdef SFLOG_ENABLED
|
---|
| 3318 | /*
|
---|
| 3319 | * This is just for logging page faults and such.
|
---|
| 3320 | */
|
---|
| 3321 |
|
---|
| 3322 | /** Pointer to the ops generic_file_mmap returns the first time it's called. */
|
---|
| 3323 | static struct vm_operations_struct const *g_pGenericFileVmOps = NULL;
|
---|
| 3324 | /** Merge of g_LoggingVmOpsTemplate and g_pGenericFileVmOps. */
|
---|
| 3325 | static struct vm_operations_struct g_LoggingVmOps;
|
---|
| 3326 |
|
---|
| 3327 |
|
---|
| 3328 | /* Generic page fault callback: */
|
---|
[85698] | 3329 | # if RTLNX_VER_MIN(4,11,0)
|
---|
[77731] | 3330 | static vm_fault_t vbsf_vmlog_fault(struct vm_fault *vmf)
|
---|
| 3331 | {
|
---|
| 3332 | vm_fault_t rc;
|
---|
| 3333 | SFLOGFLOW(("vbsf_vmlog_fault: vmf=%p flags=%#x addr=%p\n", vmf, vmf->flags, vmf->address));
|
---|
| 3334 | rc = g_pGenericFileVmOps->fault(vmf);
|
---|
| 3335 | SFLOGFLOW(("vbsf_vmlog_fault: returns %d\n", rc));
|
---|
| 3336 | return rc;
|
---|
| 3337 | }
|
---|
[85698] | 3338 | # elif RTLNX_VER_MIN(2,6,23)
|
---|
[77731] | 3339 | static int vbsf_vmlog_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
---|
| 3340 | {
|
---|
| 3341 | int rc;
|
---|
[85698] | 3342 | # if RTLNX_VER_MIN(4,10,0)
|
---|
[77731] | 3343 | SFLOGFLOW(("vbsf_vmlog_fault: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->address));
|
---|
| 3344 | # else
|
---|
| 3345 | SFLOGFLOW(("vbsf_vmlog_fault: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->virtual_address));
|
---|
| 3346 | # endif
|
---|
| 3347 | rc = g_pGenericFileVmOps->fault(vma, vmf);
|
---|
| 3348 | SFLOGFLOW(("vbsf_vmlog_fault: returns %d\n", rc));
|
---|
| 3349 | return rc;
|
---|
| 3350 | }
|
---|
| 3351 | # endif
|
---|
| 3352 |
|
---|
| 3353 |
|
---|
| 3354 | /* Special/generic page fault handler: */
|
---|
[85698] | 3355 | # if RTLNX_VER_MIN(2,6,26)
|
---|
| 3356 | # elif RTLNX_VER_MIN(2,6,1)
|
---|
[77731] | 3357 | static struct page *vbsf_vmlog_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
|
---|
| 3358 | {
|
---|
| 3359 | struct page *page;
|
---|
| 3360 | SFLOGFLOW(("vbsf_vmlog_nopage: vma=%p address=%p type=%p:{%#x}\n", vma, address, type, type ? *type : 0));
|
---|
| 3361 | page = g_pGenericFileVmOps->nopage(vma, address, type);
|
---|
| 3362 | SFLOGFLOW(("vbsf_vmlog_nopage: returns %p\n", page));
|
---|
| 3363 | return page;
|
---|
| 3364 | }
|
---|
| 3365 | # else
|
---|
| 3366 | static struct page *vbsf_vmlog_nopage(struct vm_area_struct *vma, unsigned long address, int write_access_or_unused)
|
---|
| 3367 | {
|
---|
| 3368 | struct page *page;
|
---|
| 3369 | SFLOGFLOW(("vbsf_vmlog_nopage: vma=%p address=%p wau=%d\n", vma, address, write_access_or_unused));
|
---|
| 3370 | page = g_pGenericFileVmOps->nopage(vma, address, write_access_or_unused);
|
---|
| 3371 | SFLOGFLOW(("vbsf_vmlog_nopage: returns %p\n", page));
|
---|
| 3372 | return page;
|
---|
| 3373 | }
|
---|
| 3374 | # endif /* < 2.6.26 */
|
---|
| 3375 |
|
---|
| 3376 |
|
---|
| 3377 | /* Special page fault callback for making something writable: */
|
---|
[85698] | 3378 | # if RTLNX_VER_MIN(4,11,0)
|
---|
[77731] | 3379 | static vm_fault_t vbsf_vmlog_page_mkwrite(struct vm_fault *vmf)
|
---|
| 3380 | {
|
---|
| 3381 | vm_fault_t rc;
|
---|
| 3382 | SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vmf=%p flags=%#x addr=%p\n", vmf, vmf->flags, vmf->address));
|
---|
| 3383 | rc = g_pGenericFileVmOps->page_mkwrite(vmf);
|
---|
| 3384 | SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
|
---|
| 3385 | return rc;
|
---|
| 3386 | }
|
---|
[85698] | 3387 | # elif RTLNX_VER_MIN(2,6,30)
|
---|
[77731] | 3388 | static int vbsf_vmlog_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
---|
| 3389 | {
|
---|
| 3390 | int rc;
|
---|
[85698] | 3391 | # if RTLNX_VER_MIN(4,10,0)
|
---|
[77975] | 3392 | SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->address));
|
---|
| 3393 | # else
|
---|
[77731] | 3394 | SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->virtual_address));
|
---|
[77975] | 3395 | # endif
|
---|
[77731] | 3396 | rc = g_pGenericFileVmOps->page_mkwrite(vma, vmf);
|
---|
| 3397 | SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
|
---|
| 3398 | return rc;
|
---|
| 3399 | }
|
---|
[85698] | 3400 | # elif RTLNX_VER_MIN(2,6,18)
|
---|
[77731] | 3401 | static int vbsf_vmlog_page_mkwrite(struct vm_area_struct *vma, struct page *page)
|
---|
| 3402 | {
|
---|
| 3403 | int rc;
|
---|
| 3404 | SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vma=%p page=%p\n", vma, page));
|
---|
| 3405 | rc = g_pGenericFileVmOps->page_mkwrite(vma, page);
|
---|
| 3406 | SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
|
---|
| 3407 | return rc;
|
---|
| 3408 | }
|
---|
| 3409 | # endif
|
---|
| 3410 |
|
---|
| 3411 |
|
---|
| 3412 | /* Special page fault callback for mapping pages: */
|
---|
[88428] | 3413 | # if RTLNX_VER_MIN(5,12,0)
|
---|
| 3414 | static vm_fault_t vbsf_vmlog_map_pages(struct vm_fault *vmf, pgoff_t start, pgoff_t end)
|
---|
| 3415 | {
|
---|
| 3416 | vm_fault_t rc;
|
---|
| 3417 | SFLOGFLOW(("vbsf_vmlog_map_pages: vmf=%p (flags=%#x addr=%p) start=%p end=%p\n", vmf, vmf->flags, vmf->address, start, end));
|
---|
| 3418 | rc = g_pGenericFileVmOps->map_pages(vmf, start, end);
|
---|
| 3419 | SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
|
---|
| 3420 | return rc;
|
---|
| 3421 | }
|
---|
| 3422 | # elif RTLNX_VER_MIN(4,10,0)
|
---|
[77731] | 3423 | static void vbsf_vmlog_map_pages(struct vm_fault *vmf, pgoff_t start, pgoff_t end)
|
---|
| 3424 | {
|
---|
| 3425 | SFLOGFLOW(("vbsf_vmlog_map_pages: vmf=%p (flags=%#x addr=%p) start=%p end=%p\n", vmf, vmf->flags, vmf->address, start, end));
|
---|
| 3426 | g_pGenericFileVmOps->map_pages(vmf, start, end);
|
---|
| 3427 | SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
|
---|
| 3428 | }
|
---|
[85698] | 3429 | # elif RTLNX_VER_MIN(4,8,0)
|
---|
[77731] | 3430 | static void vbsf_vmlog_map_pages(struct fault_env *fenv, pgoff_t start, pgoff_t end)
|
---|
| 3431 | {
|
---|
[77735] | 3432 | SFLOGFLOW(("vbsf_vmlog_map_pages: fenv=%p (flags=%#x addr=%p) start=%p end=%p\n", fenv, fenv->flags, fenv->address, start, end));
|
---|
[77731] | 3433 | g_pGenericFileVmOps->map_pages(fenv, start, end);
|
---|
| 3434 | SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
|
---|
| 3435 | }
|
---|
[85698] | 3436 | # elif RTLNX_VER_MIN(3,15,0)
|
---|
[77731] | 3437 | static void vbsf_vmlog_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
|
---|
| 3438 | {
|
---|
| 3439 | SFLOGFLOW(("vbsf_vmlog_map_pages: vma=%p vmf=%p (flags=%#x addr=%p)\n", vma, vmf, vmf->flags, vmf->virtual_address));
|
---|
| 3440 | g_pGenericFileVmOps->map_pages(vma, vmf);
|
---|
| 3441 | SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
|
---|
| 3442 | }
|
---|
| 3443 | # endif
|
---|
| 3444 |
|
---|
| 3445 |
|
---|
| 3446 | /** Overload template. */
|
---|
| 3447 | static struct vm_operations_struct const g_LoggingVmOpsTemplate = {
|
---|
[85698] | 3448 | # if RTLNX_VER_MIN(2,6,23)
|
---|
[77731] | 3449 | .fault = vbsf_vmlog_fault,
|
---|
| 3450 | # endif
|
---|
[85698] | 3451 | # if RTLNX_VER_MAX(2,6,26)
|
---|
[77731] | 3452 | .nopage = vbsf_vmlog_nopage,
|
---|
| 3453 | # endif
|
---|
[85698] | 3454 | # if RTLNX_VER_MIN(2,6,18)
|
---|
[77731] | 3455 | .page_mkwrite = vbsf_vmlog_page_mkwrite,
|
---|
| 3456 | # endif
|
---|
[85698] | 3457 | # if RTLNX_VER_MIN(3,15,0)
|
---|
[77731] | 3458 | .map_pages = vbsf_vmlog_map_pages,
|
---|
| 3459 | # endif
|
---|
| 3460 | };
|
---|
| 3461 |
|
---|
| 3462 | /** file_operations::mmap wrapper for logging purposes. */
|
---|
| 3463 | extern int vbsf_reg_mmap(struct file *file, struct vm_area_struct *vma)
|
---|
| 3464 | {
|
---|
| 3465 | int rc;
|
---|
| 3466 | SFLOGFLOW(("vbsf_reg_mmap: file=%p vma=%p\n", file, vma));
|
---|
| 3467 | rc = generic_file_mmap(file, vma);
|
---|
| 3468 | if (rc == 0) {
|
---|
| 3469 | /* Merge the ops and template the first time thru (there's a race here). */
|
---|
| 3470 | if (g_pGenericFileVmOps == NULL) {
|
---|
| 3471 | uintptr_t const *puSrc1 = (uintptr_t *)vma->vm_ops;
|
---|
| 3472 | uintptr_t const *puSrc2 = (uintptr_t *)&g_LoggingVmOpsTemplate;
|
---|
| 3473 | uintptr_t volatile *puDst = (uintptr_t *)&g_LoggingVmOps;
|
---|
| 3474 | size_t cbLeft = sizeof(g_LoggingVmOps) / sizeof(*puDst);
|
---|
| 3475 | while (cbLeft-- > 0) {
|
---|
| 3476 | *puDst = *puSrc2 && *puSrc1 ? *puSrc2 : *puSrc1;
|
---|
| 3477 | puSrc1++;
|
---|
| 3478 | puSrc2++;
|
---|
| 3479 | puDst++;
|
---|
| 3480 | }
|
---|
| 3481 | g_pGenericFileVmOps = vma->vm_ops;
|
---|
| 3482 | vma->vm_ops = &g_LoggingVmOps;
|
---|
| 3483 | } else if (g_pGenericFileVmOps == vma->vm_ops)
|
---|
| 3484 | vma->vm_ops = &g_LoggingVmOps;
|
---|
| 3485 | else
|
---|
| 3486 | SFLOGFLOW(("vbsf_reg_mmap: Warning: vm_ops=%p, expected %p!\n", vma->vm_ops, g_pGenericFileVmOps));
|
---|
| 3487 | }
|
---|
| 3488 | SFLOGFLOW(("vbsf_reg_mmap: returns %d\n", rc));
|
---|
| 3489 | return rc;
|
---|
| 3490 | }
|
---|
| 3491 |
|
---|
| 3492 | #endif /* SFLOG_ENABLED */
|
---|
| 3493 |
|
---|
| 3494 |
|
---|
[77529] | 3495 | /**
|
---|
| 3496 | * File operations for regular files.
|
---|
[77939] | 3497 | *
|
---|
| 3498 | * Note on splice_read/splice_write/sendfile:
|
---|
| 3499 | * - Splice was introduced in 2.6.17. The generic_file_splice_read/write
|
---|
| 3500 | * methods go thru the page cache, which is undesirable and is why we
|
---|
| 3501 | * need to cook our own versions of the code as long as we cannot track
|
---|
| 3502 | * host-side writes and correctly invalidate the guest page-cache.
|
---|
| 3503 | * - Sendfile reimplemented using splice in 2.6.23.
|
---|
| 3504 | * - The default_file_splice_read/write no-page-cache fallback functions,
|
---|
[77943] | 3505 | * were introduced in 2.6.31. The write one work in page units.
|
---|
| 3506 | * - Since linux 3.16 there is iter_file_splice_write that uses iter_write.
|
---|
| 3507 | * - Since linux 4.9 the generic_file_splice_read function started using
|
---|
| 3508 | * read_iter.
|
---|
[77529] | 3509 | */
|
---|
| 3510 | struct file_operations vbsf_reg_fops = {
|
---|
[77853] | 3511 | .open = vbsf_reg_open,
|
---|
[88716] | 3512 | #if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */
|
---|
[77853] | 3513 | .read = vbsf_reg_read,
|
---|
| 3514 | .write = vbsf_reg_write,
|
---|
[88571] | 3515 | #endif
|
---|
[85698] | 3516 | #if RTLNX_VER_MIN(3,16,0)
|
---|
[77853] | 3517 | .read_iter = vbsf_reg_read_iter,
|
---|
| 3518 | .write_iter = vbsf_reg_write_iter,
|
---|
[85698] | 3519 | #elif RTLNX_VER_MIN(2,6,19)
|
---|
[77873] | 3520 | .aio_read = vbsf_reg_aio_read,
|
---|
| 3521 | .aio_write = vbsf_reg_aio_write,
|
---|
[77626] | 3522 | #endif
|
---|
[77853] | 3523 | .release = vbsf_reg_release,
|
---|
[77731] | 3524 | #ifdef SFLOG_ENABLED
|
---|
[77853] | 3525 | .mmap = vbsf_reg_mmap,
|
---|
[77731] | 3526 | #else
|
---|
[77853] | 3527 | .mmap = generic_file_mmap,
|
---|
[77731] | 3528 | #endif
|
---|
[85698] | 3529 | #if RTLNX_VER_RANGE(2,6,17, 2,6,31)
|
---|
[77853] | 3530 | .splice_read = vbsf_splice_read,
|
---|
[77943] | 3531 | #endif
|
---|
[85698] | 3532 | #if RTLNX_VER_MIN(3,16,0)
|
---|
[77943] | 3533 | .splice_write = iter_file_splice_write,
|
---|
[85698] | 3534 | #elif RTLNX_VER_MIN(2,6,17)
|
---|
[77942] | 3535 | .splice_write = vbsf_splice_write,
|
---|
[70786] | 3536 | #endif
|
---|
[85698] | 3537 | #if RTLNX_VER_RANGE(2,5,30, 2,6,23)
|
---|
[77944] | 3538 | .sendfile = vbsf_reg_sendfile,
|
---|
[77939] | 3539 | #endif
|
---|
[77853] | 3540 | .llseek = vbsf_reg_llseek,
|
---|
| 3541 | .fsync = vbsf_reg_fsync,
|
---|
[85698] | 3542 | #if RTLNX_VER_MIN(4,5,0)
|
---|
[77853] | 3543 | .copy_file_range = vbsf_reg_copy_file_range,
|
---|
| 3544 | #endif
|
---|
[2614] | 3545 | };
|
---|
| 3546 |
|
---|
[77873] | 3547 |
|
---|
| 3548 | /**
|
---|
| 3549 | * Inodes operations for regular files.
|
---|
| 3550 | */
|
---|
[77529] | 3551 | struct inode_operations vbsf_reg_iops = {
|
---|
[85698] | 3552 | #if RTLNX_VER_MIN(2,5,18)
|
---|
[77873] | 3553 | .getattr = vbsf_inode_getattr,
|
---|
[77559] | 3554 | #else
|
---|
[77561] | 3555 | .revalidate = vbsf_inode_revalidate,
|
---|
[2614] | 3556 | #endif
|
---|
[77873] | 3557 | .setattr = vbsf_inode_setattr,
|
---|
[2614] | 3558 | };
|
---|
[6054] | 3559 |
|
---|
[77529] | 3560 |
|
---|
[77873] | 3561 |
|
---|
| 3562 | /*********************************************************************************************************************************
|
---|
[77966] | 3563 | * Address Space Operations on Regular Files (for mmap, sendfile, direct I/O) *
|
---|
[77873] | 3564 | *********************************************************************************************************************************/
|
---|
| 3565 |
|
---|
[77420] | 3566 | /**
|
---|
| 3567 | * Used to read the content of a page into the page cache.
|
---|
| 3568 | *
|
---|
| 3569 | * Needed for mmap and reads+writes when the file is mmapped in a
|
---|
| 3570 | * shared+writeable fashion.
|
---|
| 3571 | */
|
---|
[77529] | 3572 | static int vbsf_readpage(struct file *file, struct page *page)
|
---|
[6054] | 3573 | {
|
---|
[77530] | 3574 | struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
|
---|
[77526] | 3575 | int err;
|
---|
[6054] | 3576 |
|
---|
[77529] | 3577 | SFLOGFLOW(("vbsf_readpage: inode=%p file=%p page=%p off=%#llx\n", inode, file, page, (uint64_t)page->index << PAGE_SHIFT));
|
---|
[77549] | 3578 | Assert(PageLocked(page));
|
---|
[77458] | 3579 |
|
---|
[77549] | 3580 | if (PageUptodate(page)) {
|
---|
| 3581 | unlock_page(page);
|
---|
| 3582 | return 0;
|
---|
| 3583 | }
|
---|
| 3584 |
|
---|
[77526] | 3585 | if (!is_bad_inode(inode)) {
|
---|
| 3586 | VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
|
---|
| 3587 | if (pReq) {
|
---|
[77951] | 3588 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
| 3589 | struct vbsf_reg_info *sf_r = file->private_data;
|
---|
[77549] | 3590 | uint32_t cbRead;
|
---|
[77526] | 3591 | int vrc;
|
---|
[77421] | 3592 |
|
---|
[77526] | 3593 | pReq->PgLst.offFirstPage = 0;
|
---|
| 3594 | pReq->PgLst.aPages[0] = page_to_phys(page);
|
---|
[77951] | 3595 | vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root,
|
---|
[77526] | 3596 | pReq,
|
---|
| 3597 | sf_r->Handle.hHost,
|
---|
| 3598 | (uint64_t)page->index << PAGE_SHIFT,
|
---|
| 3599 | PAGE_SIZE,
|
---|
| 3600 | 1 /*cPages*/);
|
---|
[77421] | 3601 |
|
---|
[77526] | 3602 | cbRead = pReq->Parms.cb32Read.u.value32;
|
---|
| 3603 | AssertStmt(cbRead <= PAGE_SIZE, cbRead = PAGE_SIZE);
|
---|
| 3604 | VbglR0PhysHeapFree(pReq);
|
---|
[6054] | 3605 |
|
---|
[77526] | 3606 | if (RT_SUCCESS(vrc)) {
|
---|
| 3607 | if (cbRead == PAGE_SIZE) {
|
---|
| 3608 | /* likely */
|
---|
| 3609 | } else {
|
---|
| 3610 | uint8_t *pbMapped = (uint8_t *)kmap(page);
|
---|
| 3611 | RT_BZERO(&pbMapped[cbRead], PAGE_SIZE - cbRead);
|
---|
| 3612 | kunmap(page);
|
---|
| 3613 | /** @todo truncate the inode file size? */
|
---|
| 3614 | }
|
---|
[77421] | 3615 |
|
---|
[77526] | 3616 | flush_dcache_page(page);
|
---|
| 3617 | SetPageUptodate(page);
|
---|
[77549] | 3618 | unlock_page(page);
|
---|
| 3619 | return 0;
|
---|
| 3620 | }
|
---|
| 3621 | err = -RTErrConvertToErrno(vrc);
|
---|
[77526] | 3622 | } else
|
---|
| 3623 | err = -ENOMEM;
|
---|
| 3624 | } else
|
---|
| 3625 | err = -EIO;
|
---|
[77549] | 3626 | SetPageError(page);
|
---|
[77526] | 3627 | unlock_page(page);
|
---|
| 3628 | return err;
|
---|
[6054] | 3629 | }
|
---|
| 3630 |
|
---|
[77458] | 3631 |
|
---|
[77439] | 3632 | /**
|
---|
| 3633 | * Used to write out the content of a dirty page cache page to the host file.
|
---|
| 3634 | *
|
---|
| 3635 | * Needed for mmap and writes when the file is mmapped in a shared+writeable
|
---|
| 3636 | * fashion.
|
---|
| 3637 | */
|
---|
[85698] | 3638 | #if RTLNX_VER_MIN(2,5,52)
|
---|
[77529] | 3639 | static int vbsf_writepage(struct page *page, struct writeback_control *wbc)
|
---|
[77966] | 3640 | #else
|
---|
| 3641 | static int vbsf_writepage(struct page *page)
|
---|
| 3642 | #endif
|
---|
[20707] | 3643 | {
|
---|
[77530] | 3644 | struct address_space *mapping = page->mapping;
|
---|
| 3645 | struct inode *inode = mapping->host;
|
---|
| 3646 | struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
|
---|
[77549] | 3647 | struct vbsf_handle *pHandle = vbsf_handle_find(sf_i, VBSF_HANDLE_F_WRITE, VBSF_HANDLE_F_APPEND);
|
---|
[77530] | 3648 | int err;
|
---|
[20707] | 3649 |
|
---|
[77529] | 3650 | SFLOGFLOW(("vbsf_writepage: inode=%p page=%p off=%#llx pHandle=%p (%#llx)\n",
|
---|
[77966] | 3651 | inode, page, (uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle ? pHandle->hHost : 0));
|
---|
[20707] | 3652 |
|
---|
[77526] | 3653 | if (pHandle) {
|
---|
[77951] | 3654 | struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
|
---|
| 3655 | VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
|
---|
[77526] | 3656 | if (pReq) {
|
---|
[77951] | 3657 | uint64_t const cbFile = i_size_read(inode);
|
---|
| 3658 | uint64_t const offInFile = (uint64_t)page->index << PAGE_SHIFT;
|
---|
| 3659 | uint32_t const cbToWrite = page->index != (cbFile >> PAGE_SHIFT) ? PAGE_SIZE
|
---|
| 3660 | : (uint32_t)cbFile & (uint32_t)PAGE_OFFSET_MASK;
|
---|
[77526] | 3661 | int vrc;
|
---|
[20707] | 3662 |
|
---|
[77526] | 3663 | pReq->PgLst.offFirstPage = 0;
|
---|
| 3664 | pReq->PgLst.aPages[0] = page_to_phys(page);
|
---|
[77951] | 3665 | vrc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root,
|
---|
[77549] | 3666 | pReq,
|
---|
| 3667 | pHandle->hHost,
|
---|
| 3668 | offInFile,
|
---|
| 3669 | cbToWrite,
|
---|
| 3670 | 1 /*cPages*/);
|
---|
[77959] | 3671 | sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
|
---|
[77526] | 3672 | AssertMsgStmt(pReq->Parms.cb32Write.u.value32 == cbToWrite || RT_FAILURE(vrc), /* lazy bird */
|
---|
[77549] | 3673 | ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite),
|
---|
| 3674 | vrc = VERR_WRITE_ERROR);
|
---|
[77526] | 3675 | VbglR0PhysHeapFree(pReq);
|
---|
[20707] | 3676 |
|
---|
[77526] | 3677 | if (RT_SUCCESS(vrc)) {
|
---|
| 3678 | /* Update the inode if we've extended the file. */
|
---|
| 3679 | /** @todo is this necessary given the cbToWrite calc above? */
|
---|
| 3680 | uint64_t const offEndOfWrite = offInFile + cbToWrite;
|
---|
| 3681 | if ( offEndOfWrite > cbFile
|
---|
| 3682 | && offEndOfWrite > i_size_read(inode))
|
---|
| 3683 | i_size_write(inode, offEndOfWrite);
|
---|
[20707] | 3684 |
|
---|
[77967] | 3685 | /* Update and unlock the page. */
|
---|
[77526] | 3686 | if (PageError(page))
|
---|
| 3687 | ClearPageError(page);
|
---|
[77967] | 3688 | SetPageUptodate(page);
|
---|
| 3689 | unlock_page(page);
|
---|
[20707] | 3690 |
|
---|
[77967] | 3691 | vbsf_handle_release(pHandle, pSuperInfo, "vbsf_writepage");
|
---|
| 3692 | return 0;
|
---|
[77526] | 3693 | }
|
---|
[77967] | 3694 |
|
---|
| 3695 | /*
|
---|
| 3696 | * We failed.
|
---|
| 3697 | */
|
---|
| 3698 | err = -EIO;
|
---|
[77526] | 3699 | } else
|
---|
| 3700 | err = -ENOMEM;
|
---|
[77951] | 3701 | vbsf_handle_release(pHandle, pSuperInfo, "vbsf_writepage");
|
---|
[77526] | 3702 | } else {
|
---|
[77967] | 3703 | /** @todo we could re-open the file here and deal with this... */
|
---|
[77526] | 3704 | static uint64_t volatile s_cCalls = 0;
|
---|
| 3705 | if (s_cCalls++ < 16)
|
---|
[77529] | 3706 | printk("vbsf_writepage: no writable handle for %s..\n", sf_i->path->String.ach);
|
---|
[77967] | 3707 | err = -EIO;
|
---|
[77526] | 3708 | }
|
---|
[77967] | 3709 | SetPageError(page);
|
---|
[77526] | 3710 | unlock_page(page);
|
---|
| 3711 | return err;
|
---|
[20707] | 3712 | }
|
---|
| 3713 |
|
---|
[77873] | 3714 |
|
---|
[85698] | 3715 | #if RTLNX_VER_MIN(2,6,24)
|
---|
[77444] | 3716 | /**
|
---|
| 3717 | * Called when writing thru the page cache (which we shouldn't be doing).
|
---|
| 3718 | */
|
---|
[77529] | 3719 | int vbsf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
|
---|
| 3720 | unsigned len, unsigned flags, struct page **pagep, void **fsdata)
|
---|
[20707] | 3721 | {
|
---|
[77526] | 3722 | /** @todo r=bird: We shouldn't ever get here, should we? Because we don't use
|
---|
| 3723 | * the page cache for any writes AFAIK. We could just as well use
|
---|
| 3724 | * simple_write_begin & simple_write_end here if we think we really
|
---|
| 3725 | * need to have non-NULL function pointers in the table... */
|
---|
| 3726 | static uint64_t volatile s_cCalls = 0;
|
---|
| 3727 | if (s_cCalls++ < 16) {
|
---|
[77529] | 3728 | printk("vboxsf: Unexpected call to vbsf_write_begin(pos=%#llx len=%#x flags=%#x)! Please report.\n",
|
---|
[77526] | 3729 | (unsigned long long)pos, len, flags);
|
---|
[77529] | 3730 | RTLogBackdoorPrintf("vboxsf: Unexpected call to vbsf_write_begin(pos=%#llx len=%#x flags=%#x)! Please report.\n",
|
---|
[77967] | 3731 | (unsigned long long)pos, len, flags);
|
---|
[77966] | 3732 | # ifdef WARN_ON
|
---|
[77526] | 3733 | WARN_ON(1);
|
---|
[77966] | 3734 | # endif
|
---|
[77526] | 3735 | }
|
---|
| 3736 | return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
|
---|
[20707] | 3737 | }
|
---|
[77966] | 3738 | #endif /* KERNEL_VERSION >= 2.6.24 */
|
---|
[20707] | 3739 |
|
---|
[77873] | 3740 |
|
---|
[85698] | 3741 | #if RTLNX_VER_MIN(2,4,10)
|
---|
[78135] | 3742 |
|
---|
| 3743 | # ifdef VBOX_UEK
|
---|
| 3744 | # undef iov_iter /* HACK ALERT! Don't put anything needing vbsf_iov_iter after this fun! */
|
---|
| 3745 | # endif
|
---|
| 3746 |
|
---|
[77004] | 3747 | /**
|
---|
| 3748 | * This is needed to make open accept O_DIRECT as well as dealing with direct
|
---|
| 3749 | * I/O requests if we don't intercept them earlier.
|
---|
| 3750 | */
|
---|
[85693] | 3751 | # if RTLNX_VER_MIN(4, 7, 0) \
|
---|
| 3752 | || (defined(CONFIG_SUSE_KERNEL) && RTLNX_VER_RANGE(4,4,73, 4,4,74) /** @todo Figure out when exactly. */) \
|
---|
| 3753 | || (defined(CONFIG_SUSE_KERNEL) && RTLNX_VER_RANGE(4,4,75, 4,4,90) /** @todo Figure out when exactly. */) \
|
---|
| 3754 | || (defined(CONFIG_SUSE_KERNEL) && RTLNX_VER_RANGE(4,4,92, 4,5,0) /** @todo Figure out when exactly. */)
|
---|
[77529] | 3755 | static ssize_t vbsf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
---|
[85693] | 3756 | # elif RTLNX_VER_MIN(4, 1, 0)
|
---|
[77529] | 3757 | static ssize_t vbsf_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
|
---|
[85693] | 3758 | # elif RTLNX_VER_MIN(3, 16, 0) || defined(VBOX_UEK)
|
---|
[77529] | 3759 | static ssize_t vbsf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
|
---|
[85693] | 3760 | # elif RTLNX_VER_MIN(2, 6, 6)
|
---|
[77529] | 3761 | static ssize_t vbsf_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
---|
[85693] | 3762 | # elif RTLNX_VER_MIN(2, 5, 55)
|
---|
[77529] | 3763 | static int vbsf_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
---|
[85693] | 3764 | # elif RTLNX_VER_MIN(2, 5, 41)
|
---|
[77529] | 3765 | static int vbsf_direct_IO(int rw, struct file *file, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
---|
[85693] | 3766 | # elif RTLNX_VER_MIN(2, 5, 35)
|
---|
[77529] | 3767 | static int vbsf_direct_IO(int rw, struct inode *inode, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
---|
[85693] | 3768 | # elif RTLNX_VER_MIN(2, 5, 26)
|
---|
[77529] | 3769 | static int vbsf_direct_IO(int rw, struct inode *inode, char *buf, loff_t offset, size_t count)
|
---|
[77966] | 3770 | # elif LINUX_VERSION_CODE == KERNEL_VERSION(2, 4, 21) && defined(I_NEW) /* RHEL3 Frankenkernel. */
|
---|
| 3771 | static int vbsf_direct_IO(int rw, struct file *file, struct kiobuf *buf, unsigned long whatever1, int whatever2)
|
---|
| 3772 | # else
|
---|
| 3773 | static int vbsf_direct_IO(int rw, struct inode *inode, struct kiobuf *buf, unsigned long whatever1, int whatever2)
|
---|
| 3774 | # endif
|
---|
[77004] | 3775 | {
|
---|
[77526] | 3776 | TRACE();
|
---|
| 3777 | return -EINVAL;
|
---|
[77004] | 3778 | }
|
---|
[78135] | 3779 |
|
---|
[77966] | 3780 | #endif
|
---|
[77004] | 3781 |
|
---|
[77529] | 3782 | /**
|
---|
| 3783 | * Address space (for the page cache) operations for regular files.
|
---|
[77741] | 3784 | *
|
---|
| 3785 | * @todo the FsPerf touch/flush (mmap) test fails on 4.4.0 (ubuntu 16.04 lts).
|
---|
[77529] | 3786 | */
|
---|
| 3787 | struct address_space_operations vbsf_reg_aops = {
|
---|
[77873] | 3788 | .readpage = vbsf_readpage,
|
---|
| 3789 | .writepage = vbsf_writepage,
|
---|
[77526] | 3790 | /** @todo Need .writepages if we want msync performance... */
|
---|
[85698] | 3791 | #if RTLNX_VER_MIN(2,5,12)
|
---|
[77526] | 3792 | .set_page_dirty = __set_page_dirty_buffers,
|
---|
[77966] | 3793 | #endif
|
---|
[85698] | 3794 | #if RTLNX_VER_MIN(2,6,24)
|
---|
[77873] | 3795 | .write_begin = vbsf_write_begin,
|
---|
| 3796 | .write_end = simple_write_end,
|
---|
[85698] | 3797 | #elif RTLNX_VER_MIN(2,5,45)
|
---|
[77873] | 3798 | .prepare_write = simple_prepare_write,
|
---|
| 3799 | .commit_write = simple_commit_write,
|
---|
[77966] | 3800 | #endif
|
---|
[85698] | 3801 | #if RTLNX_VER_MIN(2,4,10)
|
---|
[77873] | 3802 | .direct_IO = vbsf_direct_IO,
|
---|
[77966] | 3803 | #endif
|
---|
[6054] | 3804 | };
|
---|
[76744] | 3805 |
|
---|