Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 847097 | Differences between
and this patch

Collapse All | Expand All

(-)a/src/VBox/Additions/linux/sharedfolders/regops.c (-385 / +5 lines)
Lines 1-10 Link Here
1
/* $Id: regops.c $ */
1
/* $Id$ */
2
/** @file
2
/** @file
3
 * vboxsf - VBox Linux Shared Folders VFS, regular file inode and file operations.
3
 * vboxsf - VBox Linux Shared Folders VFS, regular file inode and file operations.
4
 */
4
 */
5
6
/*
5
/*
7
 * Copyright (C) 2006-2020 Oracle Corporation
6
 * Copyright (C) 2006-2022 Oracle Corporation
8
 *
7
 *
9
 * Permission is hereby granted, free of charge, to any person
8
 * Permission is hereby granted, free of charge, to any person
10
 * obtaining a copy of this software and associated documentation
9
 * obtaining a copy of this software and associated documentation
Lines 27-34 Link Here
27
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28
 * OTHER DEALINGS IN THE SOFTWARE.
27
 * OTHER DEALINGS IN THE SOFTWARE.
29
 */
28
 */
30
31
32
/*********************************************************************************************************************************
29
/*********************************************************************************************************************************
33
*   Header Files                                                                                                                 *
30
*   Header Files                                                                                                                 *
34
*********************************************************************************************************************************/
31
*********************************************************************************************************************************/
Lines 53-82 Link Here
53
# include <linux/swap.h> /* for mark_page_accessed */
50
# include <linux/swap.h> /* for mark_page_accessed */
54
#endif
51
#endif
55
#include <iprt/err.h>
52
#include <iprt/err.h>
56
57
#if RTLNX_VER_MAX(2,6,18)
53
#if RTLNX_VER_MAX(2,6,18)
58
# define SEEK_END 2
54
# define SEEK_END 2
59
#endif
55
#endif
60
61
#if RTLNX_VER_MAX(3,16,0)
56
#if RTLNX_VER_MAX(3,16,0)
62
# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & ITER_KVEC) )
57
# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & ITER_KVEC) )
63
#elif RTLNX_VER_MAX(3,19,0)
58
#elif RTLNX_VER_MAX(3,19,0)
64
# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & (ITER_KVEC | ITER_BVEC)) )
59
# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & (ITER_KVEC | ITER_BVEC)) )
65
#endif
60
#endif
66
67
#if RTLNX_VER_MAX(4,17,0)
61
#if RTLNX_VER_MAX(4,17,0)
68
# define vm_fault_t int
62
# define vm_fault_t int
69
#endif
63
#endif
70
71
#if RTLNX_VER_MAX(2,5,20)
64
#if RTLNX_VER_MAX(2,5,20)
72
# define pgoff_t    unsigned long
65
# define pgoff_t    unsigned long
73
#endif
66
#endif
74
75
#if RTLNX_VER_MAX(2,5,12)
67
#if RTLNX_VER_MAX(2,5,12)
76
# define PageUptodate(a_pPage) Page_Uptodate(a_pPage)
68
# define PageUptodate(a_pPage) Page_Uptodate(a_pPage)
77
#endif
69
#endif
78
79
80
/*********************************************************************************************************************************
70
/*********************************************************************************************************************************
81
*   Defined Constants And Macros                                                                                                 *
71
*   Defined Constants And Macros                                                                                                 *
82
*********************************************************************************************************************************/
72
*********************************************************************************************************************************/
Lines 87-94 Link Here
87
#else
77
#else
88
# define VBSF_GET_ITER_TYPE(a_pIter) ((a_pIter)->type)
78
# define VBSF_GET_ITER_TYPE(a_pIter) ((a_pIter)->type)
89
#endif
79
#endif
90
91
92
/*********************************************************************************************************************************
80
/*********************************************************************************************************************************
93
*   Structures and Typedefs                                                                                                      *
81
*   Structures and Typedefs                                                                                                      *
94
*********************************************************************************************************************************/
82
*********************************************************************************************************************************/
Lines 114-120 struct vbsf_iov_iter { Link Here
114
# define ITER_KVEC 1
102
# define ITER_KVEC 1
115
# define iov_iter vbsf_iov_iter
103
# define iov_iter vbsf_iov_iter
116
#endif
104
#endif
117
118
#if RTLNX_VER_MIN(2,6,19)
105
#if RTLNX_VER_MIN(2,6,19)
119
/** Used by vbsf_iter_lock_pages() to keep the first page of the next segment. */
106
/** Used by vbsf_iter_lock_pages() to keep the first page of the next segment. */
120
struct vbsf_iter_stash {
107
struct vbsf_iter_stash {
Lines 133-140 struct vbsf_iter_stash { Link Here
133
#else
120
#else
134
# define VBSF_ITER_STASH_INITIALIZER    { NULL, 0, ~(size_t)0 }
121
# define VBSF_ITER_STASH_INITIALIZER    { NULL, 0, ~(size_t)0 }
135
#endif
122
#endif
136
137
138
/*********************************************************************************************************************************
123
/*********************************************************************************************************************************
139
*   Internal Functions                                                                                                           *
124
*   Internal Functions                                                                                                           *
140
*********************************************************************************************************************************/
125
*********************************************************************************************************************************/
Lines 143-155 static void vbsf_unlock_user_pages(struc Link Here
143
static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
128
static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
144
                                           uint8_t const *pbSrcBuf, struct page **papSrcPages,
129
                                           uint8_t const *pbSrcBuf, struct page **papSrcPages,
145
                                           uint32_t offSrcPage, size_t cSrcPages);
130
                                           uint32_t offSrcPage, size_t cSrcPages);
146
147
148
/*********************************************************************************************************************************
131
/*********************************************************************************************************************************
149
*   Provide more recent uio.h functionality to older kernels.                                                                    *
132
*   Provide more recent uio.h functionality to older kernels.                                                                    *
150
*********************************************************************************************************************************/
133
*********************************************************************************************************************************/
151
#if RTLNX_VER_RANGE(2,6,19,  3,16,0)
134
#if RTLNX_VER_RANGE(2,6,19,  3,16,0)
152
153
/**
135
/**
154
 * Detects the vector type.
136
 * Detects the vector type.
155
 */
137
 */
Lines 171-178 static int vbsf_iov_iter_detect_type(str Link Here
171
    }
153
    }
172
    return 0;
154
    return 0;
173
}
155
}
174
175
176
# undef  iov_iter_count
156
# undef  iov_iter_count
177
# define iov_iter_count(a_pIter)                vbsf_iov_iter_count(a_pIter)
157
# define iov_iter_count(a_pIter)                vbsf_iov_iter_count(a_pIter)
178
static size_t vbsf_iov_iter_count(struct vbsf_iov_iter const *iter)
158
static size_t vbsf_iov_iter_count(struct vbsf_iov_iter const *iter)
Lines 186-193 static size_t vbsf_iov_iter_count(struct Link Here
186
    }
166
    }
187
    return cbRet - iter->iov_offset;
167
    return cbRet - iter->iov_offset;
188
}
168
}
189
190
191
# undef  iov_iter_single_seg_count
169
# undef  iov_iter_single_seg_count
192
# define iov_iter_single_seg_count(a_pIter)     vbsf_iov_iter_single_seg_count(a_pIter)
170
# define iov_iter_single_seg_count(a_pIter)     vbsf_iov_iter_single_seg_count(a_pIter)
193
static size_t vbsf_iov_iter_single_seg_count(struct vbsf_iov_iter const *iter)
171
static size_t vbsf_iov_iter_single_seg_count(struct vbsf_iov_iter const *iter)
Lines 196-203 static size_t vbsf_iov_iter_single_seg_c Link Here
196
        return iter->iov->iov_len - iter->iov_offset;
174
        return iter->iov->iov_len - iter->iov_offset;
197
    return 0;
175
    return 0;
198
}
176
}
199
200
201
# undef  iov_iter_advance
177
# undef  iov_iter_advance
202
# define iov_iter_advance(a_pIter, a_cbSkip)    vbsf_iov_iter_advance(a_pIter, a_cbSkip)
178
# define iov_iter_advance(a_pIter, a_cbSkip)    vbsf_iov_iter_advance(a_pIter, a_cbSkip)
203
static void vbsf_iov_iter_advance(struct vbsf_iov_iter *iter, size_t cbSkip)
179
static void vbsf_iov_iter_advance(struct vbsf_iov_iter *iter, size_t cbSkip)
Lines 226-233 static void vbsf_iov_iter_advance(struct Link Here
226
        }
202
        }
227
    }
203
    }
228
}
204
}
229
230
231
# undef  iov_iter_get_pages
205
# undef  iov_iter_get_pages
232
# define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
206
# define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
233
    vbsf_iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
207
    vbsf_iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
Lines 244-250 static ssize_t vbsf_iov_iter_get_pages(s Link Here
244
            size_t              cPages     = RT_MIN(cPagesLeft, cMaxPages);
218
            size_t              cPages     = RT_MIN(cPagesLeft, cMaxPages);
245
            struct task_struct *pTask      = current;
219
            struct task_struct *pTask      = current;
246
            size_t              cPagesLocked;
220
            size_t              cPagesLocked;
247
248
            down_read(&pTask->mm->mmap_sem);
221
            down_read(&pTask->mm->mmap_sem);
249
            cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, iter->v_write, 1 /*force*/, papPages, NULL);
222
            cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, iter->v_write, 1 /*force*/, papPages, NULL);
250
            up_read(&pTask->mm->mmap_sem);
223
            up_read(&pTask->mm->mmap_sem);
Lines 269-276 static ssize_t vbsf_iov_iter_get_pages(s Link Here
269
    AssertFailed();
242
    AssertFailed();
270
    return 0;
243
    return 0;
271
}
244
}
272
273
274
# undef  iov_iter_truncate
245
# undef  iov_iter_truncate
275
# define iov_iter_truncate(iter, cbNew)         vbsf_iov_iter_truncate(iter, cbNew)
246
# define iov_iter_truncate(iter, cbNew)         vbsf_iov_iter_truncate(iter, cbNew)
276
static void vbsf_iov_iter_truncate(struct vbsf_iov_iter *iter, size_t cbNew)
247
static void vbsf_iov_iter_truncate(struct vbsf_iov_iter *iter, size_t cbNew)
Lines 278-285 static void vbsf_iov_iter_truncate(struc Link Here
278
    /* we have no counter or stuff, so it's a no-op. */
249
    /* we have no counter or stuff, so it's a no-op. */
279
    RT_NOREF(iter, cbNew);
250
    RT_NOREF(iter, cbNew);
280
}
251
}
281
282
283
# undef  iov_iter_revert
252
# undef  iov_iter_revert
284
# define iov_iter_revert(a_pIter, a_cbRewind) vbsf_iov_iter_revert(a_pIter, a_cbRewind)
253
# define iov_iter_revert(a_pIter, a_cbRewind) vbsf_iov_iter_revert(a_pIter, a_cbRewind)
285
void vbsf_iov_iter_revert(struct vbsf_iov_iter *iter, size_t cbRewind)
254
void vbsf_iov_iter_revert(struct vbsf_iov_iter *iter, size_t cbRewind)
Lines 293-307 void vbsf_iov_iter_revert(struct vbsf_io Link Here
293
        cbRewind -= iter->iov_offset;
262
        cbRewind -= iter->iov_offset;
294
        iter->iov_offset = 0;
263
        iter->iov_offset = 0;
295
    }
264
    }
296
297
    while (cbRewind > 0) {
265
    while (cbRewind > 0) {
298
        struct iovec const *pIov  = --iter->iov;
266
        struct iovec const *pIov  = --iter->iov;
299
        size_t const        cbSeg = pIov->iov_len;
267
        size_t const        cbSeg = pIov->iov_len;
300
        iter->nr_segs++;
268
        iter->nr_segs++;
301
302
        Assert((uintptr_t)pIov >= (uintptr_t)iter->iov_org);
269
        Assert((uintptr_t)pIov >= (uintptr_t)iter->iov_org);
303
        Assert(iter->nr_segs <= iter->nr_segs_org);
270
        Assert(iter->nr_segs <= iter->nr_segs_org);
304
305
        if (cbRewind <= cbSeg) {
271
        if (cbRewind <= cbSeg) {
306
            iter->iov_offset = cbSeg - cbRewind;
272
            iter->iov_offset = cbSeg - cbRewind;
307
            break;
273
            break;
Lines 309-318 void vbsf_iov_iter_revert(struct vbsf_io Link Here
309
        cbRewind -= cbSeg;
275
        cbRewind -= cbSeg;
310
    }
276
    }
311
}
277
}
312
313
#endif /* 2.6.19 <= linux < 3.16.0 */
278
#endif /* 2.6.19 <= linux < 3.16.0 */
314
#if RTLNX_VER_RANGE(3,16,0,  3,16,35)
279
#if RTLNX_VER_RANGE(3,16,0,  3,16,35)
315
316
/** This is for implementing cMaxPage on 3.16 which doesn't have it. */
280
/** This is for implementing cMaxPage on 3.16 which doesn't have it. */
317
static ssize_t vbsf_iov_iter_get_pages_3_16(struct iov_iter *iter, struct page **papPages,
281
static ssize_t vbsf_iov_iter_get_pages_3_16(struct iov_iter *iter, struct page **papPages,
318
                                            size_t cbMax, unsigned cMaxPages, size_t *poffPg0)
282
                                            size_t cbMax, unsigned cMaxPages, size_t *poffPg0)
Lines 329-338 static ssize_t vbsf_iov_iter_get_pages_3 Link Here
329
# undef  iov_iter_get_pages
293
# undef  iov_iter_get_pages
330
# define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
294
# define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
331
    vbsf_iov_iter_get_pages_3_16(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
295
    vbsf_iov_iter_get_pages_3_16(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
332
333
#endif /* 3.16.0-3.16.34 */
296
#endif /* 3.16.0-3.16.34 */
334
#if RTLNX_VER_RANGE(2,6,19,  3,18,0)
297
#if RTLNX_VER_RANGE(2,6,19,  3,18,0)
335
336
static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter)
298
static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter)
337
{
299
{
338
    size_t const cbTotal = cbToCopy;
300
    size_t const cbTotal = cbToCopy;
Lines 370-377 static size_t copy_from_iter(uint8_t *pb Link Here
370
    }
332
    }
371
    return cbTotal - cbToCopy;
333
    return cbTotal - cbToCopy;
372
}
334
}
373
374
375
static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter)
335
static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter)
376
{
336
{
377
    size_t const cbTotal = cbToCopy;
337
    size_t const cbTotal = cbToCopy;
Lines 410-424 static size_t copy_to_iter(uint8_t const Link Here
410
    }
370
    }
411
    return cbTotal - cbToCopy;
371
    return cbTotal - cbToCopy;
412
}
372
}
413
414
#endif /* 3.16.0 <= linux < 3.18.0 */
373
#endif /* 3.16.0 <= linux < 3.18.0 */
415
416
417
418
/*********************************************************************************************************************************
374
/*********************************************************************************************************************************
419
*   Handle management                                                                                                            *
375
*   Handle management                                                                                                            *
420
*********************************************************************************************************************************/
376
*********************************************************************************************************************************/
421
422
/**
377
/**
423
 * Called when an inode is released to unlink all handles that might impossibly
378
 * Called when an inode is released to unlink all handles that might impossibly
424
 * still be associated with it.
379
 * still be associated with it.
Lines 431-448 void vbsf_handle_drop_chain(struct vbsf_ Link Here
431
    unsigned long     fSavedFlags;
386
    unsigned long     fSavedFlags;
432
    SFLOGFLOW(("vbsf_handle_drop_chain: %p\n", pInodeInfo));
387
    SFLOGFLOW(("vbsf_handle_drop_chain: %p\n", pInodeInfo));
433
    spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
388
    spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
434
435
    RTListForEachSafe(&pInodeInfo->HandleList, pCur, pNext, struct vbsf_handle, Entry) {
389
    RTListForEachSafe(&pInodeInfo->HandleList, pCur, pNext, struct vbsf_handle, Entry) {
436
        AssertMsg(   (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
390
        AssertMsg(   (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
437
                  ==                 (VBSF_HANDLE_F_MAGIC      | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
391
                  ==                 (VBSF_HANDLE_F_MAGIC      | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
438
        pCur->fFlags |= VBSF_HANDLE_F_ON_LIST;
392
        pCur->fFlags |= VBSF_HANDLE_F_ON_LIST;
439
        RTListNodeRemove(&pCur->Entry);
393
        RTListNodeRemove(&pCur->Entry);
440
    }
394
    }
441
442
    spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
395
    spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
443
}
396
}
444
445
446
/**
397
/**
447
 * Locates a handle that matches all the flags in @a fFlags.
398
 * Locates a handle that matches all the flags in @a fFlags.
448
 *
399
 *
Lines 457-463 struct vbsf_handle *vbsf_handle_find(str Link Here
457
    struct vbsf_handle *pCur;
408
    struct vbsf_handle *pCur;
458
    unsigned long     fSavedFlags;
409
    unsigned long     fSavedFlags;
459
    spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
410
    spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
460
461
    RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) {
411
    RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) {
462
        AssertMsg(   (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
412
        AssertMsg(   (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
463
                  ==                 (VBSF_HANDLE_F_MAGIC      | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
413
                  ==                 (VBSF_HANDLE_F_MAGIC      | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
Lines 472-484 struct vbsf_handle *vbsf_handle_find(str Link Here
472
            ASMAtomicDecU32(&pCur->cRefs);
422
            ASMAtomicDecU32(&pCur->cRefs);
473
        }
423
        }
474
    }
424
    }
475
476
    spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
425
    spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
477
    SFLOGFLOW(("vbsf_handle_find: returns NULL!\n"));
426
    SFLOGFLOW(("vbsf_handle_find: returns NULL!\n"));
478
    return NULL;
427
    return NULL;
479
}
428
}
480
481
482
/**
429
/**
483
 * Slow worker for vbsf_handle_release() that does the freeing.
430
 * Slow worker for vbsf_handle_release() that does the freeing.
484
 *
431
 *
Lines 492-516 uint32_t vbsf_handle_release_slow(struct Link Here
492
{
439
{
493
    int rc;
440
    int rc;
494
    unsigned long fSavedFlags;
441
    unsigned long fSavedFlags;
495
496
    SFLOGFLOW(("vbsf_handle_release_slow: %p (%s)\n", pHandle, pszCaller));
442
    SFLOGFLOW(("vbsf_handle_release_slow: %p (%s)\n", pHandle, pszCaller));
497
498
    /*
443
    /*
499
     * Remove from the list.
444
     * Remove from the list.
500
     */
445
     */
501
    spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
446
    spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
502
503
    AssertMsg((pHandle->fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC, ("%p %#x\n", pHandle, pHandle->fFlags));
447
    AssertMsg((pHandle->fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC, ("%p %#x\n", pHandle, pHandle->fFlags));
504
    Assert(pHandle->pInodeInfo);
448
    Assert(pHandle->pInodeInfo);
505
    Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
449
    Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
506
507
    if (pHandle->fFlags & VBSF_HANDLE_F_ON_LIST) {
450
    if (pHandle->fFlags & VBSF_HANDLE_F_ON_LIST) {
508
        pHandle->fFlags &= ~VBSF_HANDLE_F_ON_LIST;
451
        pHandle->fFlags &= ~VBSF_HANDLE_F_ON_LIST;
509
        RTListNodeRemove(&pHandle->Entry);
452
        RTListNodeRemove(&pHandle->Entry);
510
    }
453
    }
511
512
    spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
454
    spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
513
514
    /*
455
    /*
515
     * Actually destroy it.
456
     * Actually destroy it.
516
     */
457
     */
Lines 522-529 uint32_t vbsf_handle_release_slow(struct Link Here
522
    kfree(pHandle);
463
    kfree(pHandle);
523
    return 0;
464
    return 0;
524
}
465
}
525
526
527
/**
466
/**
528
 * Appends a handle to a handle list.
467
 * Appends a handle to a handle list.
529
 *
468
 *
Lines 536-549 void vbsf_handle_append(struct vbsf_inod Link Here
536
    struct vbsf_handle *pCur;
475
    struct vbsf_handle *pCur;
537
#endif
476
#endif
538
    unsigned long fSavedFlags;
477
    unsigned long fSavedFlags;
539
540
    SFLOGFLOW(("vbsf_handle_append: %p (to %p)\n", pHandle, pInodeInfo));
478
    SFLOGFLOW(("vbsf_handle_append: %p (to %p)\n", pHandle, pInodeInfo));
541
    AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
479
    AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
542
              ("%p %#x\n", pHandle, pHandle->fFlags));
480
              ("%p %#x\n", pHandle, pHandle->fFlags));
543
    Assert(pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
481
    Assert(pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
544
545
    spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
482
    spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
546
547
    AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
483
    AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
548
          ("%p %#x\n", pHandle, pHandle->fFlags));
484
          ("%p %#x\n", pHandle, pHandle->fFlags));
549
#ifdef VBOX_STRICT
485
#ifdef VBOX_STRICT
Lines 554-572 void vbsf_handle_append(struct vbsf_inod Link Here
554
    }
490
    }
555
    pHandle->pInodeInfo = pInodeInfo;
491
    pHandle->pInodeInfo = pInodeInfo;
556
#endif
492
#endif
557
558
    pHandle->fFlags |= VBSF_HANDLE_F_ON_LIST;
493
    pHandle->fFlags |= VBSF_HANDLE_F_ON_LIST;
559
    RTListAppend(&pInodeInfo->HandleList, &pHandle->Entry);
494
    RTListAppend(&pInodeInfo->HandleList, &pHandle->Entry);
560
561
    spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
495
    spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
562
}
496
}
563
564
565
566
/*********************************************************************************************************************************
497
/*********************************************************************************************************************************
567
*   Misc                                                                                                                         *
498
*   Misc                                                                                                                         *
568
*********************************************************************************************************************************/
499
*********************************************************************************************************************************/
569
570
#if RTLNX_VER_MAX(2,6,6)
500
#if RTLNX_VER_MAX(2,6,6)
571
/** Any writable mappings? */
501
/** Any writable mappings? */
572
DECLINLINE(bool) mapping_writably_mapped(struct address_space const *mapping)
502
DECLINLINE(bool) mapping_writably_mapped(struct address_space const *mapping)
Lines 578-585 DECLINLINE(bool) mapping_writably_mapped Link Here
578
# endif
508
# endif
579
}
509
}
580
#endif
510
#endif
581
582
583
#if RTLNX_VER_MAX(2,5,12)
511
#if RTLNX_VER_MAX(2,5,12)
584
/** Missing in 2.4.x, so just stub it for now. */
512
/** Missing in 2.4.x, so just stub it for now. */
585
DECLINLINE(bool) PageWriteback(struct page const *page)
513
DECLINLINE(bool) PageWriteback(struct page const *page)
Lines 587-594 DECLINLINE(bool) PageWriteback(struct pa Link Here
587
    return false;
515
    return false;
588
}
516
}
589
#endif
517
#endif
590
591
592
/**
518
/**
593
 * Helper for deciding wheter we should do a read via the page cache or not.
519
 * Helper for deciding wheter we should do a read via the page cache or not.
594
 *
520
 *
Lines 609-623 DECLINLINE(bool) vbsf_should_use_cached_ Link Here
609
        && mapping->nrpages > 0
535
        && mapping->nrpages > 0
610
        && mapping_writably_mapped(mapping);
536
        && mapping_writably_mapped(mapping);
611
}
537
}
612
613
614
615
/*********************************************************************************************************************************
538
/*********************************************************************************************************************************
616
*   Pipe / splice stuff mainly for 2.6.17 >= linux < 2.6.31 (where no fallbacks were available)                                  *
539
*   Pipe / splice stuff mainly for 2.6.17 >= linux < 2.6.31 (where no fallbacks were available)                                  *
617
*********************************************************************************************************************************/
540
*********************************************************************************************************************************/
618
619
#if RTLNX_VER_RANGE(2,6,17,  3,16,0)
541
#if RTLNX_VER_RANGE(2,6,17,  3,16,0)
620
621
# if RTLNX_VER_MAX(2,6,30)
542
# if RTLNX_VER_MAX(2,6,30)
622
#  define LOCK_PIPE(a_pPipe)   do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0)
543
#  define LOCK_PIPE(a_pPipe)   do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0)
623
#  define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0)
544
#  define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0)
Lines 625-632 DECLINLINE(bool) vbsf_should_use_cached_ Link Here
625
#  define LOCK_PIPE(a_pPipe)   pipe_lock(a_pPipe)
546
#  define LOCK_PIPE(a_pPipe)   pipe_lock(a_pPipe)
626
#  define UNLOCK_PIPE(a_pPipe) pipe_unlock(a_pPipe)
547
#  define UNLOCK_PIPE(a_pPipe) pipe_unlock(a_pPipe)
627
# endif
548
# endif
628
629
630
/** Waits for the pipe buffer status to change. */
549
/** Waits for the pipe buffer status to change. */
631
static void vbsf_wait_pipe(struct pipe_inode_info *pPipe)
550
static void vbsf_wait_pipe(struct pipe_inode_info *pPipe)
632
{
551
{
Lines 637-650 static void vbsf_wait_pipe(struct pipe_i Link Here
637
    prepare_to_wait(&pPipe->wait, &WaitStuff, TASK_INTERRUPTIBLE);
556
    prepare_to_wait(&pPipe->wait, &WaitStuff, TASK_INTERRUPTIBLE);
638
# endif
557
# endif
639
    UNLOCK_PIPE(pPipe);
558
    UNLOCK_PIPE(pPipe);
640
641
    schedule();
559
    schedule();
642
643
    finish_wait(&pPipe->wait, &WaitStuff);
560
    finish_wait(&pPipe->wait, &WaitStuff);
644
    LOCK_PIPE(pPipe);
561
    LOCK_PIPE(pPipe);
645
}
562
}
646
647
648
/** Worker for vbsf_feed_pages_to_pipe that wakes up readers. */
563
/** Worker for vbsf_feed_pages_to_pipe that wakes up readers. */
649
static void vbsf_wake_up_pipe(struct pipe_inode_info *pPipe, bool fReaders)
564
static void vbsf_wake_up_pipe(struct pipe_inode_info *pPipe, bool fReaders)
650
{
565
{
Lines 656-673 static void vbsf_wake_up_pipe(struct pip Link Here
656
    else
571
    else
657
        kill_fasync(&pPipe->fasync_writers, SIGIO, POLL_OUT);
572
        kill_fasync(&pPipe->fasync_writers, SIGIO, POLL_OUT);
658
}
573
}
659
660
#endif
574
#endif
661
#if RTLNX_VER_RANGE(2,6,17,  2,6,31)
575
#if RTLNX_VER_RANGE(2,6,17,  2,6,31)
662
663
/** Verify pipe buffer content (needed for page-cache to ensure idle page). */
576
/** Verify pipe buffer content (needed for page-cache to ensure idle page). */
664
static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
577
static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
665
{
578
{
666
    /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/
579
    /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/
667
    return 0;
580
    return 0;
668
}
581
}
669
670
671
/** Maps the buffer page. */
582
/** Maps the buffer page. */
672
static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic)
583
static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic)
673
{
584
{
Lines 681-688 static void *vbsf_pipe_buf_map(struct pi Link Here
681
    /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/
592
    /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/
682
    return pvRet;
593
    return pvRet;
683
}
594
}
684
685
686
/** Unmaps the buffer page. */
595
/** Unmaps the buffer page. */
687
static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping)
596
static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping)
688
{
597
{
Lines 694-717 static void vbsf_pipe_buf_unmap(struct p Link Here
694
        kunmap_atomic(pvMapping, KM_USER0);
603
        kunmap_atomic(pvMapping, KM_USER0);
695
    }
604
    }
696
}
605
}
697
698
699
/** Gets a reference to the page. */
606
/** Gets a reference to the page. */
700
static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
607
static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
701
{
608
{
702
    page_cache_get(pPipeBuf->page);
609
    page_cache_get(pPipeBuf->page);
703
    /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
610
    /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
704
}
611
}
705
706
707
/** Release the buffer page (counter to vbsf_pipe_buf_get). */
612
/** Release the buffer page (counter to vbsf_pipe_buf_get). */
708
static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
613
static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
709
{
614
{
710
    /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
615
    /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
711
    page_cache_release(pPipeBuf->page);
616
    page_cache_release(pPipeBuf->page);
712
}
617
}
713
714
715
/** Attempt to steal the page.
618
/** Attempt to steal the page.
716
 * @returns 0 success, 1 on failure.  */
619
 * @returns 0 success, 1 on failure.  */
717
static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
620
static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
Lines 724-731 static int vbsf_pipe_buf_steal(struct pi Link Here
724
    SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf));
627
    SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf));
725
    return 1;
628
    return 1;
726
}
629
}
727
728
729
/**
630
/**
730
 * Pipe buffer operations for used by vbsf_feed_pages_to_pipe.
631
 * Pipe buffer operations for used by vbsf_feed_pages_to_pipe.
731
 */
632
 */
Lines 742-749 static struct pipe_buf_operations vbsf_p Link Here
742
    .release   = vbsf_pipe_buf_release,
643
    .release   = vbsf_pipe_buf_release,
743
    .steal     = vbsf_pipe_buf_steal,
644
    .steal     = vbsf_pipe_buf_steal,
744
};
645
};
745
746
747
/**
646
/**
748
 * Feeds the pages to the pipe.
647
 * Feeds the pages to the pipe.
749
 *
648
 *
Lines 755-761 static ssize_t vbsf_feed_pages_to_pipe(s Link Here
755
    ssize_t cbRet       = 0;
654
    ssize_t cbRet       = 0;
756
    size_t  iPage       = 0;
655
    size_t  iPage       = 0;
757
    bool    fNeedWakeUp = false;
656
    bool    fNeedWakeUp = false;
758
759
    LOCK_PIPE(pPipe);
657
    LOCK_PIPE(pPipe);
760
    for (;;) {
658
    for (;;) {
761
        if (   pPipe->readers > 0
659
        if (   pPipe->readers > 0
Lines 770-782 static ssize_t vbsf_feed_pages_to_pipe(s Link Here
770
            pPipeBuf->ops       = &vbsf_pipe_buf_ops;
668
            pPipeBuf->ops       = &vbsf_pipe_buf_ops;
771
            pPipeBuf->flags     = fFlags & SPLICE_F_GIFT ? PIPE_BUF_FLAG_GIFT : 0;
669
            pPipeBuf->flags     = fFlags & SPLICE_F_GIFT ? PIPE_BUF_FLAG_GIFT : 0;
772
            pPipeBuf->page      = papPages[iPage];
670
            pPipeBuf->page      = papPages[iPage];
773
774
            papPages[iPage++] = NULL;
671
            papPages[iPage++] = NULL;
775
            pPipe->nrbufs++;
672
            pPipe->nrbufs++;
776
            fNeedWakeUp |= pPipe->inode != NULL;
673
            fNeedWakeUp |= pPipe->inode != NULL;
777
            offPg0 = 0;
674
            offPg0 = 0;
778
            cbRet += cbThisPage;
675
            cbRet += cbThisPage;
779
780
            /* done? */
676
            /* done? */
781
            cbActual -= cbThisPage;
677
            cbActual -= cbThisPage;
782
            if (!cbActual)
678
            if (!cbActual)
Lines 807-820 static ssize_t vbsf_feed_pages_to_pipe(s Link Here
807
        }
703
        }
808
    }
704
    }
809
    UNLOCK_PIPE(pPipe);
705
    UNLOCK_PIPE(pPipe);
810
811
    if (fNeedWakeUp)
706
    if (fNeedWakeUp)
812
        vbsf_wake_up_pipe(pPipe, true /*fReaders*/);
707
        vbsf_wake_up_pipe(pPipe, true /*fReaders*/);
813
814
    return cbRet;
708
    return cbRet;
815
}
709
}
816
817
818
/**
710
/**
819
 * For splicing from a file to a pipe.
711
 * For splicing from a file to a pipe.
820
 */
712
 */
Lines 823-829 static ssize_t vbsf_splice_read(struct f Link Here
823
    struct inode           *inode      = VBSF_GET_F_DENTRY(file)->d_inode;
715
    struct inode           *inode      = VBSF_GET_F_DENTRY(file)->d_inode;
824
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
716
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
825
    ssize_t                 cbRet;
717
    ssize_t                 cbRet;
826
827
    SFLOGFLOW(("vbsf_splice_read: file=%p poffset=%p{%#RX64} pipe=%p len=%#zx flags=%#x\n", file, poffset, *poffset, pipe, len, flags));
718
    SFLOGFLOW(("vbsf_splice_read: file=%p poffset=%p{%#RX64} pipe=%p len=%#zx flags=%#x\n", file, poffset, *poffset, pipe, len, flags));
828
    if (vbsf_should_use_cached_read(file, inode->i_mapping, pSuperInfo)) {
719
    if (vbsf_should_use_cached_read(file, inode->i_mapping, pSuperInfo)) {
829
        cbRet = generic_file_splice_read(file, poffset, pipe, len, flags);
720
        cbRet = generic_file_splice_read(file, poffset, pipe, len, flags);
Lines 873-882 static ssize_t vbsf_splice_read(struct f Link Here
873
                    uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
764
                    uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
874
                    AssertStmt(cbActual <= cbToRead, cbActual = cbToRead);
765
                    AssertStmt(cbActual <= cbToRead, cbActual = cbToRead);
875
                    SFLOG2(("vbsf_splice_read: read -> %#x bytes @ %#RX64\n", cbActual, offFile));
766
                    SFLOG2(("vbsf_splice_read: read -> %#x bytes @ %#RX64\n", cbActual, offFile));
876
877
                    VbglR0PhysHeapFree(pReq);
767
                    VbglR0PhysHeapFree(pReq);
878
                    pReq = NULL;
768
                    pReq = NULL;
879
880
                    /*
769
                    /*
881
                     * Now, feed it to the pipe thingy.
770
                     * Now, feed it to the pipe thingy.
882
                     * This will take ownership of the all pages no matter what happens.
771
                     * This will take ownership of the all pages no matter what happens.
Lines 890-896 static ssize_t vbsf_splice_read(struct f Link Here
890
                }
779
                }
891
                i = cPages;
780
                i = cPages;
892
            }
781
            }
893
894
            while (i-- > 0)
782
            while (i-- > 0)
895
                if (apPages[i])
783
                if (apPages[i])
896
                    __free_pages(apPages[i], 0);
784
                    __free_pages(apPages[i], 0);
Lines 903-912 static ssize_t vbsf_splice_read(struct f Link Here
903
    SFLOGFLOW(("vbsf_splice_read: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset));
791
    SFLOGFLOW(("vbsf_splice_read: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset));
904
    return cbRet;
792
    return cbRet;
905
}
793
}
906
907
#endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31 */
794
#endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31 */
908
#if RTLNX_VER_RANGE(2,6,17,  3,16,0)
795
#if RTLNX_VER_RANGE(2,6,17,  3,16,0)
909
910
/**
796
/**
911
 * For splicing from a pipe to a file.
797
 * For splicing from a pipe to a file.
912
 *
798
 *
Lines 918-924 static ssize_t vbsf_splice_write(struct Link Here
918
    struct inode           *inode      = VBSF_GET_F_DENTRY(file)->d_inode;
804
    struct inode           *inode      = VBSF_GET_F_DENTRY(file)->d_inode;
919
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
805
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
920
    ssize_t                 cbRet;
806
    ssize_t                 cbRet;
921
922
    SFLOGFLOW(("vbsf_splice_write: pPipe=%p file=%p poffset=%p{%#RX64} len=%#zx flags=%#x\n", pPipe, file, poffset, *poffset, len, flags));
807
    SFLOGFLOW(("vbsf_splice_write: pPipe=%p file=%p poffset=%p{%#RX64} len=%#zx flags=%#x\n", pPipe, file, poffset, *poffset, len, flags));
923
    /** @todo later if (false) {
808
    /** @todo later if (false) {
924
        cbRet = generic_file_splice_write(pPipe, file, poffset, len, flags);
809
        cbRet = generic_file_splice_write(pPipe, file, poffset, len, flags);
Lines 943-951 static ssize_t vbsf_splice_write(struct Link Here
943
            loff_t                offFile     = *poffset;
828
            loff_t                offFile     = *poffset;
944
            bool                  fNeedWakeUp = false;
829
            bool                  fNeedWakeUp = false;
945
            cbRet = 0;
830
            cbRet = 0;
946
947
            LOCK_PIPE(pPipe);
831
            LOCK_PIPE(pPipe);
948
949
            for (;;) {
832
            for (;;) {
950
                unsigned cBufs = pPipe->nrbufs;
833
                unsigned cBufs = pPipe->nrbufs;
951
                /*SFLOG2(("vbsf_splice_write: nrbufs=%#x curbuf=%#x\n", cBufs, pPipe->curbuf));*/
834
                /*SFLOG2(("vbsf_splice_write: nrbufs=%#x curbuf=%#x\n", cBufs, pPipe->curbuf));*/
Lines 957-969 static ssize_t vbsf_splice_write(struct Link Here
957
                    struct pipe_buffer *pPipeBuf      = &pPipe->bufs[pPipe->curbuf];
840
                    struct pipe_buffer *pPipeBuf      = &pPipe->bufs[pPipe->curbuf];
958
                    uint32_t            cPagesToWrite = 1;
841
                    uint32_t            cPagesToWrite = 1;
959
                    uint32_t            cbToWrite     = pPipeBuf->len;
842
                    uint32_t            cbToWrite     = pPipeBuf->len;
960
961
                    Assert(pPipeBuf->offset < PAGE_SIZE);
843
                    Assert(pPipeBuf->offset < PAGE_SIZE);
962
                    Assert(pPipeBuf->offset + pPipeBuf->len <= PAGE_SIZE);
844
                    Assert(pPipeBuf->offset + pPipeBuf->len <= PAGE_SIZE);
963
964
                    pReq->PgLst.offFirstPage = pPipeBuf->offset & PAGE_OFFSET;
845
                    pReq->PgLst.offFirstPage = pPipeBuf->offset & PAGE_OFFSET;
965
                    pReq->PgLst.aPages[0]    = page_to_phys(pPipeBuf->page);
846
                    pReq->PgLst.aPages[0]    = page_to_phys(pPipeBuf->page);
966
967
                    /* Add any adjacent page buffers: */
847
                    /* Add any adjacent page buffers: */
968
                    while (   cPagesToWrite < cBufs
848
                    while (   cPagesToWrite < cBufs
969
                           && cPagesToWrite < cMaxPages
849
                           && cPagesToWrite < cMaxPages
Lines 981-987 static ssize_t vbsf_splice_write(struct Link Here
981
                        cbToWrite     += pPipeBuf2->len;
861
                        cbToWrite     += pPipeBuf2->len;
982
                        cPagesToWrite += 1;
862
                        cPagesToWrite += 1;
983
                    }
863
                    }
984
985
                    /* Check that we don't have signals pending before we issue the write, as
864
                    /* Check that we don't have signals pending before we issue the write, as
986
                       we'll only end up having to cancel the HGCM request 99% of the time: */
865
                       we'll only end up having to cancel the HGCM request 99% of the time: */
987
                    if (!signal_pending(current)) {
866
                    if (!signal_pending(current)) {
Lines 999-1023 static ssize_t vbsf_splice_write(struct Link Here
999
                        uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
878
                        uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
1000
                        AssertStmt(cbActual <= cbToWrite, cbActual = cbToWrite);
879
                        AssertStmt(cbActual <= cbToWrite, cbActual = cbToWrite);
1001
                        SFLOG2(("vbsf_splice_write: write -> %#x bytes @ %#RX64\n", cbActual, offFile));
880
                        SFLOG2(("vbsf_splice_write: write -> %#x bytes @ %#RX64\n", cbActual, offFile));
1002
1003
                        cbRet += cbActual;
881
                        cbRet += cbActual;
1004
1005
                        while (cbActual > 0) {
882
                        while (cbActual > 0) {
1006
                            uint32_t cbAdvance = RT_MIN(pPipeBuf->len, cbActual);
883
                            uint32_t cbAdvance = RT_MIN(pPipeBuf->len, cbActual);
1007
1008
                            vbsf_reg_write_sync_page_cache(mapping, offFile, cbAdvance, NULL,
884
                            vbsf_reg_write_sync_page_cache(mapping, offFile, cbAdvance, NULL,
1009
                                                           &pPipeBuf->page, pPipeBuf->offset, 1);
885
                                                           &pPipeBuf->page, pPipeBuf->offset, 1);
1010
1011
                            offFile          += cbAdvance;
886
                            offFile          += cbAdvance;
1012
                            cbActual         -= cbAdvance;
887
                            cbActual         -= cbAdvance;
1013
                            pPipeBuf->offset += cbAdvance;
888
                            pPipeBuf->offset += cbAdvance;
1014
                            pPipeBuf->len    -= cbAdvance;
889
                            pPipeBuf->len    -= cbAdvance;
1015
1016
                            if (!pPipeBuf->len) {
890
                            if (!pPipeBuf->len) {
1017
                                struct pipe_buf_operations const *pOps = pPipeBuf->ops;
891
                                struct pipe_buf_operations const *pOps = pPipeBuf->ops;
1018
                                pPipeBuf->ops = NULL;
892
                                pPipeBuf->ops = NULL;
1019
                                pOps->release(pPipe, pPipeBuf);
893
                                pOps->release(pPipe, pPipeBuf);
1020
1021
# ifdef PIPE_BUFFERS
894
# ifdef PIPE_BUFFERS
1022
                                pPipe->curbuf  = (pPipe->curbuf + 1) % PIPE_BUFFERS;
895
                                pPipe->curbuf  = (pPipe->curbuf + 1) % PIPE_BUFFERS;
1023
# else
896
# else
Lines 1025-1031 static ssize_t vbsf_splice_write(struct Link Here
1025
# endif
898
# endif
1026
                                pPipe->nrbufs -= 1;
899
                                pPipe->nrbufs -= 1;
1027
                                pPipeBuf = &pPipe->bufs[pPipe->curbuf];
900
                                pPipeBuf = &pPipe->bufs[pPipe->curbuf];
1028
1029
# if RTLNX_VER_MAX(2,6,30)
901
# if RTLNX_VER_MAX(2,6,30)
1030
                                fNeedWakeUp |= pPipe->inode != NULL;
902
                                fNeedWakeUp |= pPipe->inode != NULL;
1031
# else
903
# else
Lines 1036-1042 static ssize_t vbsf_splice_write(struct Link Here
1036
                                break;
908
                                break;
1037
                            }
909
                            }
1038
                        }
910
                        }
1039
1040
                        *poffset = offFile;
911
                        *poffset = offFile;
1041
                    } else {
912
                    } else {
1042
                        if (cbRet == 0)
913
                        if (cbRet == 0)
Lines 1054-1073 static ssize_t vbsf_splice_write(struct Link Here
1054
                        SFLOGFLOW(("vbsf_splice_write: No buffers. No writers. The show is done!\n"));
925
                        SFLOGFLOW(("vbsf_splice_write: No buffers. No writers. The show is done!\n"));
1055
                        break;
926
                        break;
1056
                    }
927
                    }
1057
1058
                    /* Quit if if we've written some and no writers waiting on the lock: */
928
                    /* Quit if if we've written some and no writers waiting on the lock: */
1059
                    if (cbRet > 0 && pPipe->waiting_writers == 0) {
929
                    if (cbRet > 0 && pPipe->waiting_writers == 0) {
1060
                        SFLOGFLOW(("vbsf_splice_write: No waiting writers, returning what we've got.\n"));
930
                        SFLOGFLOW(("vbsf_splice_write: No waiting writers, returning what we've got.\n"));
1061
                        break;
931
                        break;
1062
                    }
932
                    }
1063
1064
                    /* Quit with EAGAIN if non-blocking: */
933
                    /* Quit with EAGAIN if non-blocking: */
1065
                    if (flags & SPLICE_F_NONBLOCK) {
934
                    if (flags & SPLICE_F_NONBLOCK) {
1066
                        if (cbRet == 0)
935
                        if (cbRet == 0)
1067
                            cbRet = -EAGAIN;
936
                            cbRet = -EAGAIN;
1068
                        break;
937
                        break;
1069
                    }
938
                    }
1070
1071
                    /* Quit if we've got pending signals: */
939
                    /* Quit if we've got pending signals: */
1072
                    if (signal_pending(current)) {
940
                    if (signal_pending(current)) {
1073
                        if (cbRet == 0)
941
                        if (cbRet == 0)
Lines 1075-1081 static ssize_t vbsf_splice_write(struct Link Here
1075
                        SFLOGFLOW(("vbsf_splice_write: pending signal! (%zd)\n", cbRet));
943
                        SFLOGFLOW(("vbsf_splice_write: pending signal! (%zd)\n", cbRet));
1076
                        break;
944
                        break;
1077
                    }
945
                    }
1078
1079
                    /* Wake up writers before we start waiting: */
946
                    /* Wake up writers before we start waiting: */
1080
                    if (fNeedWakeUp) {
947
                    if (fNeedWakeUp) {
1081
                        vbsf_wake_up_pipe(pPipe, false /*fReaders*/);
948
                        vbsf_wake_up_pipe(pPipe, false /*fReaders*/);
Lines 1084-1095 static ssize_t vbsf_splice_write(struct Link Here
1084
                    vbsf_wait_pipe(pPipe);
951
                    vbsf_wait_pipe(pPipe);
1085
                }
952
                }
1086
            } /* feed loop */
953
            } /* feed loop */
1087
1088
            if (fNeedWakeUp)
954
            if (fNeedWakeUp)
1089
                vbsf_wake_up_pipe(pPipe, false /*fReaders*/);
955
                vbsf_wake_up_pipe(pPipe, false /*fReaders*/);
1090
1091
            UNLOCK_PIPE(pPipe);
956
            UNLOCK_PIPE(pPipe);
1092
1093
            VbglR0PhysHeapFree(pReq);
957
            VbglR0PhysHeapFree(pReq);
1094
        } else {
958
        } else {
1095
            cbRet = -ENOMEM;
959
            cbRet = -ENOMEM;
Lines 1098-1106 static ssize_t vbsf_splice_write(struct Link Here
1098
    SFLOGFLOW(("vbsf_splice_write: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset));
962
    SFLOGFLOW(("vbsf_splice_write: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset));
1099
    return cbRet;
963
    return cbRet;
1100
}
964
}
1101
1102
#endif /* 2.6.17 <= LINUX_VERSION_CODE < 3.16.0 */
965
#endif /* 2.6.17 <= LINUX_VERSION_CODE < 3.16.0 */
1103
1104
#if RTLNX_VER_RANGE(2,5,30,  2,6,23)
966
#if RTLNX_VER_RANGE(2,5,30,  2,6,23)
1105
/**
967
/**
1106
 * Our own senfile implementation that does not go via the page cache like
968
 * Our own senfile implementation that does not go via the page cache like
Lines 1120-1132 static ssize_t vbsf_reg_sendfile(struct Link Here
1120
    SFLOGFLOW(("vbsf_reg_sendfile: pFile=%p poffFile=%p{%#RX64} cbToSend=%#zx pfnActor=%p pvUser=%p\n",
982
    SFLOGFLOW(("vbsf_reg_sendfile: pFile=%p poffFile=%p{%#RX64} cbToSend=%#zx pfnActor=%p pvUser=%p\n",
1121
               pFile, poffFile, poffFile ? *poffFile : 0, cbToSend, pfnActor, pvUser));
983
               pFile, poffFile, poffFile ? *poffFile : 0, cbToSend, pfnActor, pvUser));
1122
    Assert(pSuperInfo);
984
    Assert(pSuperInfo);
1123
1124
    /*
985
    /*
1125
     * Return immediately if asked to send nothing.
986
     * Return immediately if asked to send nothing.
1126
     */
987
     */
1127
    if (cbToSend == 0)
988
    if (cbToSend == 0)
1128
        return 0;
989
        return 0;
1129
1130
    /*
990
    /*
1131
     * Like for vbsf_reg_read() and vbsf_reg_read_iter(), we allow going via
991
     * Like for vbsf_reg_read() and vbsf_reg_read_iter(), we allow going via
1132
     * the page cache in some cases or configs.
992
     * the page cache in some cases or configs.
Lines 1176-1185 static ssize_t vbsf_reg_sendfile(struct Link Here
1176
# endif
1036
# endif
1177
                RdDesc.written  = 0;
1037
                RdDesc.written  = 0;
1178
                RdDesc.error    = 0;
1038
                RdDesc.error    = 0;
1179
1180
                Assert(sf_r);
1039
                Assert(sf_r);
1181
                Assert((sf_r->Handle.fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC);
1040
                Assert((sf_r->Handle.fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC);
1182
1183
                while (cbToSend > 0) {
1041
                while (cbToSend > 0) {
1184
                    /*
1042
                    /*
1185
                     * Read another chunk.  For paranoid reasons, we keep data where the page cache
1043
                     * Read another chunk.  For paranoid reasons, we keep data where the page cache
Lines 1204-1218 static ssize_t vbsf_reg_sendfile(struct Link Here
1204
                        bool const  fIsEof   = cbActual < cbToRead;
1062
                        bool const  fIsEof   = cbActual < cbToRead;
1205
                        AssertStmt(cbActual <= cbToRead, cbActual = cbToRead);
1063
                        AssertStmt(cbActual <= cbToRead, cbActual = cbToRead);
1206
                        SFLOG3(("vbsf_reg_sendfile: Read %#x bytes (offPg0=%#x), wanted %#x ...\n", cbActual, offPg0, cbToRead));
1064
                        SFLOG3(("vbsf_reg_sendfile: Read %#x bytes (offPg0=%#x), wanted %#x ...\n", cbActual, offPg0, cbToRead));
1207
1208
                        iPage = 0;
1065
                        iPage = 0;
1209
                        while (cbActual > 0) {
1066
                        while (cbActual > 0) {
1210
                            uint32_t const cbPage     = RT_MIN(cbActual, PAGE_SIZE - off);
1067
                            uint32_t const cbPage     = RT_MIN(cbActual, PAGE_SIZE - off);
1211
                            int const      cbRetActor = pfnActor(&RdDesc, apPages[iPage], off, cbPage);
1068
                            int const      cbRetActor = pfnActor(&RdDesc, apPages[iPage], off, cbPage);
1212
                            Assert(cbRetActor >= 0); /* Returns zero on failure, with RdDesc.error holding the status code. */
1069
                            Assert(cbRetActor >= 0); /* Returns zero on failure, with RdDesc.error holding the status code. */
1213
1214
                            AssertMsg(iPage < cPages && iPage < cPagesToRead, ("iPage=%#x cPages=%#x cPagesToRead=%#x\n", iPage, cPages, cPagesToRead));
1070
                            AssertMsg(iPage < cPages && iPage < cPagesToRead, ("iPage=%#x cPages=%#x cPagesToRead=%#x\n", iPage, cPages, cPagesToRead));
1215
1216
                            offFile += cbRetActor;
1071
                            offFile += cbRetActor;
1217
                            if ((uint32_t)cbRetActor == cbPage && RdDesc.count > 0) {
1072
                            if ((uint32_t)cbRetActor == cbPage && RdDesc.count > 0) {
1218
                                cbActual -= cbPage;
1073
                                cbActual -= cbPage;
Lines 1226-1239 static ssize_t vbsf_reg_sendfile(struct Link Here
1226
                            }
1081
                            }
1227
                            off = 0;
1082
                            off = 0;
1228
                        }
1083
                        }
1229
1230
                        /*
1084
                        /*
1231
                         * Are we done yet?
1085
                         * Are we done yet?
1232
                         */
1086
                         */
1233
                        if (RT_FAILURE_NP(vrc) || cbToSend == 0 || RdDesc.error != 0 || fIsEof) {
1087
                        if (RT_FAILURE_NP(vrc) || cbToSend == 0 || RdDesc.error != 0 || fIsEof) {
1234
                            break;
1088
                            break;
1235
                        }
1089
                        }
1236
1237
                        /*
1090
                        /*
1238
                         * Replace pages held by the actor.
1091
                         * Replace pages held by the actor.
1239
                         */
1092
                         */
Lines 1262-1274 static ssize_t vbsf_reg_sendfile(struct Link Here
1262
                        break;
1115
                        break;
1263
                    }
1116
                    }
1264
                }
1117
                }
1265
1266
                /*
1118
                /*
1267
                 * Free memory.
1119
                 * Free memory.
1268
                 */
1120
                 */
1269
                for (iPage = 0; iPage < cPages; iPage++)
1121
                for (iPage = 0; iPage < cPages; iPage++)
1270
                    vbsf_put_page(apPages[iPage]);
1122
                    vbsf_put_page(apPages[iPage]);
1271
1272
                /*
1123
                /*
1273
                 * Set the return values.
1124
                 * Set the return values.
1274
                 */
1125
                 */
Lines 1289-1300 static ssize_t vbsf_reg_sendfile(struct Link Here
1289
    return cbRet;
1140
    return cbRet;
1290
}
1141
}
1291
#endif /* 2.5.30 <= LINUX_VERSION_CODE < 2.6.23 */
1142
#endif /* 2.5.30 <= LINUX_VERSION_CODE < 2.6.23 */
1292
1293
1294
/*********************************************************************************************************************************
1143
/*********************************************************************************************************************************
1295
*   File operations on regular files                                                                                             *
1144
*   File operations on regular files                                                                                             *
1296
*********************************************************************************************************************************/
1145
*********************************************************************************************************************************/
1297
1298
/** Wrapper around put_page / page_cache_release.  */
1146
/** Wrapper around put_page / page_cache_release.  */
1299
DECLINLINE(void) vbsf_put_page(struct page *pPage)
1147
DECLINLINE(void) vbsf_put_page(struct page *pPage)
1300
{
1148
{
Lines 1304-1311 DECLINLINE(void) vbsf_put_page(struct pa Link Here
1304
    page_cache_release(pPage);
1152
    page_cache_release(pPage);
1305
#endif
1153
#endif
1306
}
1154
}
1307
1308
1309
/** Wrapper around get_page / page_cache_get.  */
1155
/** Wrapper around get_page / page_cache_get.  */
1310
DECLINLINE(void) vbsf_get_page(struct page *pPage)
1156
DECLINLINE(void) vbsf_get_page(struct page *pPage)
1311
{
1157
{
Lines 1315-1329 DECLINLINE(void) vbsf_get_page(struct pa Link Here
1315
    page_cache_get(pPage);
1161
    page_cache_get(pPage);
1316
#endif
1162
#endif
1317
}
1163
}
1318
1319
1320
/** Companion to vbsf_lock_user_pages(). */
1164
/** Companion to vbsf_lock_user_pages(). */
1321
static void vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack)
1165
static void vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack)
1322
{
1166
{
1323
    /* We don't mark kernel pages dirty: */
1167
    /* We don't mark kernel pages dirty: */
1324
    if (fLockPgHack)
1168
    if (fLockPgHack)
1325
        fSetDirty = false;
1169
        fSetDirty = false;
1326
1327
    while (cPages-- > 0)
1170
    while (cPages-- > 0)
1328
    {
1171
    {
1329
        struct page *pPage = papPages[cPages];
1172
        struct page *pPage = papPages[cPages];
Lines 1333-1340 static void vbsf_unlock_user_pages(struc Link Here
1333
        vbsf_put_page(pPage);
1176
        vbsf_put_page(pPage);
1334
    }
1177
    }
1335
}
1178
}
1336
1337
1338
/**
1179
/**
1339
 * Worker for vbsf_lock_user_pages_failed_check_kernel() and
1180
 * Worker for vbsf_lock_user_pages_failed_check_kernel() and
1340
 * vbsf_iter_lock_pages().
1181
 * vbsf_iter_lock_pages().
Lines 1345-1351 static int vbsf_lock_kernel_pages(uint8_ Link Here
1345
    uintptr_t const uPtrLast = (uPtrFrom & ~(uintptr_t)PAGE_OFFSET_MASK) + (cPages << PAGE_SHIFT) - 1;
1186
    uintptr_t const uPtrLast = (uPtrFrom & ~(uintptr_t)PAGE_OFFSET_MASK) + (cPages << PAGE_SHIFT) - 1;
1346
    uint8_t        *pbPage   = (uint8_t *)uPtrLast;
1187
    uint8_t        *pbPage   = (uint8_t *)uPtrLast;
1347
    size_t          iPage    = cPages;
1188
    size_t          iPage    = cPages;
1348
1349
    /*
1189
    /*
1350
     * Touch the pages first (paranoia^2).
1190
     * Touch the pages first (paranoia^2).
1351
     */
1191
     */
Lines 1362-1368 static int vbsf_lock_kernel_pages(uint8_ Link Here
1362
            pbProbe += PAGE_SIZE;
1202
            pbProbe += PAGE_SIZE;
1363
        }
1203
        }
1364
    }
1204
    }
1365
1366
    /*
1205
    /*
1367
     * Get the pages.
1206
     * Get the pages.
1368
     * Note! Fixes here probably applies to rtR0MemObjNativeLockKernel as well.
1207
     * Note! Fixes here probably applies to rtR0MemObjNativeLockKernel as well.
Lines 1395-1402 static int vbsf_lock_kernel_pages(uint8_ Link Here
1395
    }
1234
    }
1396
    return 0;
1235
    return 0;
1397
}
1236
}
1398
1399
1400
/**
1237
/**
1401
 * Catches kernel_read() and kernel_write() calls and works around them.
1238
 * Catches kernel_read() and kernel_write() calls and works around them.
1402
 *
1239
 *
Lines 1434-1444 static int vbsf_lock_user_pages_failed_c Link Here
1434
            return 0;
1271
            return 0;
1435
        }
1272
        }
1436
    }
1273
    }
1437
1438
    return rcFailed;
1274
    return rcFailed;
1439
}
1275
}
1440
1441
1442
/** Wrapper around get_user_pages. */
1276
/** Wrapper around get_user_pages. */
1443
DECLINLINE(int) vbsf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages, bool *pfLockPgHack)
1277
DECLINLINE(int) vbsf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages, bool *pfLockPgHack)
1444
{
1278
{
Lines 1465-1485 DECLINLINE(int) vbsf_lock_user_pages(uin Link Here
1465
    *pfLockPgHack = false;
1299
    *pfLockPgHack = false;
1466
    if (cPagesLocked == cPages)
1300
    if (cPagesLocked == cPages)
1467
        return 0;
1301
        return 0;
1468
1469
    /*
1302
    /*
1470
     * It failed.
1303
     * It failed.
1471
     */
1304
     */
1472
    if (cPagesLocked < 0)
1305
    if (cPagesLocked < 0)
1473
        return vbsf_lock_user_pages_failed_check_kernel(uPtrFrom, cPages, fWrite, (int)cPagesLocked, papPages, pfLockPgHack);
1306
        return vbsf_lock_user_pages_failed_check_kernel(uPtrFrom, cPages, fWrite, (int)cPagesLocked, papPages, pfLockPgHack);
1474
1475
    vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
1307
    vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
1476
1477
    /* We could use uPtrFrom + cPagesLocked to get the correct status here... */
1308
    /* We could use uPtrFrom + cPagesLocked to get the correct status here... */
1478
    return -EFAULT;
1309
    return -EFAULT;
1479
}
1310
}
1480
1481
#if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */
1311
#if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */
1482
1483
/**
1312
/**
1484
 * Read function used when accessing files that are memory mapped.
1313
 * Read function used when accessing files that are memory mapped.
1485
 *
1314
 *
Lines 1493-1529 static ssize_t vbsf_reg_read_mapped(stru Link Here
1493
    struct iov_iter iter;
1322
    struct iov_iter iter;
1494
    struct kiocb    kiocb;
1323
    struct kiocb    kiocb;
1495
    ssize_t         cbRet;
1324
    ssize_t         cbRet;
1496
1497
    init_sync_kiocb(&kiocb, file);
1325
    init_sync_kiocb(&kiocb, file);
1498
    kiocb.ki_pos = *off;
1326
    kiocb.ki_pos = *off;
1499
    iov_iter_init(&iter, READ, &iov, 1, size);
1327
    iov_iter_init(&iter, READ, &iov, 1, size);
1500
1501
    cbRet = generic_file_read_iter(&kiocb, &iter);
1328
    cbRet = generic_file_read_iter(&kiocb, &iter);
1502
1503
    *off = kiocb.ki_pos;
1329
    *off = kiocb.ki_pos;
1504
    return cbRet;
1330
    return cbRet;
1505
1506
# elif RTLNX_VER_MIN(2,6,19)
1331
# elif RTLNX_VER_MIN(2,6,19)
1507
    struct iovec    iov = { .iov_base = buf, .iov_len = size };
1332
    struct iovec    iov = { .iov_base = buf, .iov_len = size };
1508
    struct kiocb    kiocb;
1333
    struct kiocb    kiocb;
1509
    ssize_t         cbRet;
1334
    ssize_t         cbRet;
1510
1511
    init_sync_kiocb(&kiocb, file);
1335
    init_sync_kiocb(&kiocb, file);
1512
    kiocb.ki_pos = *off;
1336
    kiocb.ki_pos = *off;
1513
1514
    cbRet = generic_file_aio_read(&kiocb, &iov, 1, *off);
1337
    cbRet = generic_file_aio_read(&kiocb, &iov, 1, *off);
1515
    if (cbRet == -EIOCBQUEUED)
1338
    if (cbRet == -EIOCBQUEUED)
1516
        cbRet = wait_on_sync_kiocb(&kiocb);
1339
        cbRet = wait_on_sync_kiocb(&kiocb);
1517
1518
    *off = kiocb.ki_pos;
1340
    *off = kiocb.ki_pos;
1519
    return cbRet;
1341
    return cbRet;
1520
1521
# else /* 2.6.18 or earlier: */
1342
# else /* 2.6.18 or earlier: */
1522
    return generic_file_read(file, buf, size, off);
1343
    return generic_file_read(file, buf, size, off);
1523
# endif
1344
# endif
1524
}
1345
}
1525
1526
1527
/**
1346
/**
1528
 * Fallback case of vbsf_reg_read() that locks the user buffers and let the host
1347
 * Fallback case of vbsf_reg_read() that locks the user buffers and let the host
1529
 * write directly to them.
1348
 * write directly to them.
Lines 1546-1552 static ssize_t vbsf_reg_read_locking(str Link Here
1546
    size_t              cPages       = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
1365
    size_t              cPages       = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
1547
    size_t              cMaxPages    = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages);
1366
    size_t              cMaxPages    = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages);
1548
    bool                fLockPgHack;
1367
    bool                fLockPgHack;
1549
1550
    pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
1368
    pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
1551
    while (!pReq && cMaxPages > 4) {
1369
    while (!pReq && cMaxPages > 4) {
1552
        cMaxPages /= 2;
1370
        cMaxPages /= 2;
Lines 1570-1576 static ssize_t vbsf_reg_read_locking(str Link Here
1570
                cPages  = cMaxPages;
1388
                cPages  = cMaxPages;
1571
                cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
1389
                cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
1572
            }
1390
            }
1573
1574
            rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages, &fLockPgHack);
1391
            rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages, &fLockPgHack);
1575
            if (rc == 0) {
1392
            if (rc == 0) {
1576
                size_t iPage = cPages;
1393
                size_t iPage = cPages;
Lines 1580-1594 static ssize_t vbsf_reg_read_locking(str Link Here
1580
                cbRet = rc;
1397
                cbRet = rc;
1581
                break;
1398
                break;
1582
            }
1399
            }
1583
1584
            /*
1400
            /*
1585
             * Issue the request and unlock the pages.
1401
             * Issue the request and unlock the pages.
1586
             */
1402
             */
1587
            rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
1403
            rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
1588
1589
            Assert(cPages <= cMaxPages);
1404
            Assert(cPages <= cMaxPages);
1590
            vbsf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/, fLockPgHack);
1405
            vbsf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/, fLockPgHack);
1591
1592
            if (RT_SUCCESS(rc)) {
1406
            if (RT_SUCCESS(rc)) {
1593
                /*
1407
                /*
1594
                 * Success, advance position and buffer.
1408
                 * Success, advance position and buffer.
Lines 1599-1605 static ssize_t vbsf_reg_read_locking(str Link Here
1599
                offFile += cbActual;
1413
                offFile += cbActual;
1600
                buf      = (uint8_t *)buf + cbActual;
1414
                buf      = (uint8_t *)buf + cbActual;
1601
                size    -= cbActual;
1415
                size    -= cbActual;
1602
1603
                /*
1416
                /*
1604
                 * Are we done already?  If so commit the new file offset.
1417
                 * Are we done already?  If so commit the new file offset.
1605
                 */
1418
                 */
Lines 1637-1644 static ssize_t vbsf_reg_read_locking(str Link Here
1637
    SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off));
1450
    SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off));
1638
    return cbRet;
1451
    return cbRet;
1639
}
1452
}
1640
1641
1642
/**
1453
/**
1643
 * Read from a regular file.
1454
 * Read from a regular file.
1644
 *
1455
 *
Lines 1654-1672 static ssize_t vbsf_reg_read(struct file Link Here
1654
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
1465
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
1655
    struct vbsf_reg_info   *sf_r       = file->private_data;
1466
    struct vbsf_reg_info   *sf_r       = file->private_data;
1656
    struct address_space   *mapping    = inode->i_mapping;
1467
    struct address_space   *mapping    = inode->i_mapping;
1657
1658
    SFLOGFLOW(("vbsf_reg_read: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
1468
    SFLOGFLOW(("vbsf_reg_read: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
1659
1660
    if (!S_ISREG(inode->i_mode)) {
1469
    if (!S_ISREG(inode->i_mode)) {
1661
        LogFunc(("read from non regular file %d\n", inode->i_mode));
1470
        LogFunc(("read from non regular file %d\n", inode->i_mode));
1662
        return -EINVAL;
1471
        return -EINVAL;
1663
    }
1472
    }
1664
1665
    /** @todo XXX Check read permission according to inode->i_mode! */
1473
    /** @todo XXX Check read permission according to inode->i_mode! */
1666
1667
    if (!size)
1474
    if (!size)
1668
        return 0;
1475
        return 0;
1669
1670
    /*
1476
    /*
1671
     * If there is a mapping and O_DIRECT isn't in effect, we must at a
1477
     * If there is a mapping and O_DIRECT isn't in effect, we must at a
1672
     * heed dirty pages in the mapping and read from them.  For simplicity
1478
     * heed dirty pages in the mapping and read from them.  For simplicity
Lines 1675-1681 static ssize_t vbsf_reg_read(struct file Link Here
1675
     */
1481
     */
1676
    if (vbsf_should_use_cached_read(file, mapping, pSuperInfo))
1482
    if (vbsf_should_use_cached_read(file, mapping, pSuperInfo))
1677
        return vbsf_reg_read_mapped(file, buf, size, off);
1483
        return vbsf_reg_read_mapped(file, buf, size, off);
1678
1679
    /*
1484
    /*
1680
     * For small requests, try use an embedded buffer provided we get a heap block
1485
     * For small requests, try use an embedded buffer provided we get a heap block
1681
     * that does not cross page boundraries (see host code).
1486
     * that does not cross page boundraries (see host code).
Lines 1703-1709 static ssize_t vbsf_reg_read(struct file Link Here
1703
            VbglR0PhysHeapFree(pReq);
1508
            VbglR0PhysHeapFree(pReq);
1704
        }
1509
        }
1705
    }
1510
    }
1706
1707
# if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
1511
# if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
1708
    /*
1512
    /*
1709
     * For medium sized requests try use a bounce buffer.
1513
     * For medium sized requests try use a bounce buffer.
Lines 1734-1745 static ssize_t vbsf_reg_read(struct file Link Here
1734
        }
1538
        }
1735
    }
1539
    }
1736
# endif
1540
# endif
1737
1738
    return vbsf_reg_read_locking(file, buf, size, off, pSuperInfo, sf_r);
1541
    return vbsf_reg_read_locking(file, buf, size, off, pSuperInfo, sf_r);
1739
}
1542
}
1740
1741
#endif /* < 5.10.0 */
1543
#endif /* < 5.10.0 */
1742
1743
/**
1544
/**
1744
 * Helper the synchronizes the page cache content with something we just wrote
1545
 * Helper the synchronizes the page cache content with something we just wrote
1745
 * to the host.
1546
 * to the host.
Lines 1800-1806 static void vbsf_reg_write_sync_page_cac Link Here
1800
                unlock_page(pDstPage);
1601
                unlock_page(pDstPage);
1801
                vbsf_put_page(pDstPage);
1602
                vbsf_put_page(pDstPage);
1802
            }
1603
            }
1803
1804
            /*
1604
            /*
1805
             * Advance.
1605
             * Advance.
1806
             */
1606
             */
Lines 1825-1833 static void vbsf_reg_write_sync_page_cac Link Here
1825
    }
1625
    }
1826
    RT_NOREF(cSrcPages);
1626
    RT_NOREF(cSrcPages);
1827
}
1627
}
1828
1829
#if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */
1628
#if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */
1830
1831
/**
1629
/**
1832
 * Fallback case of vbsf_reg_write() that locks the user buffers and let the host
1630
 * Fallback case of vbsf_reg_write() that locks the user buffers and let the host
1833
 * write directly to them.
1631
 * write directly to them.
Lines 1850-1856 static ssize_t vbsf_reg_write_locking(st Link Here
1850
    size_t               cPages       = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
1648
    size_t               cPages       = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
1851
    size_t               cMaxPages    = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages);
1649
    size_t               cMaxPages    = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages);
1852
    bool                 fLockPgHack;
1650
    bool                 fLockPgHack;
1853
1854
    pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
1651
    pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
1855
    while (!pReq && cMaxPages > 4) {
1652
    while (!pReq && cMaxPages > 4) {
1856
        cMaxPages /= 2;
1653
        cMaxPages /= 2;
Lines 1874-1880 static ssize_t vbsf_reg_write_locking(st Link Here
1874
                cPages  = cMaxPages;
1671
                cPages  = cMaxPages;
1875
                cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
1672
                cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
1876
            }
1673
            }
1877
1878
            rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages, &fLockPgHack);
1674
            rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages, &fLockPgHack);
1879
            if (rc == 0) {
1675
            if (rc == 0) {
1880
                size_t iPage = cPages;
1676
                size_t iPage = cPages;
Lines 1884-1890 static ssize_t vbsf_reg_write_locking(st Link Here
1884
                cbRet = rc;
1680
                cbRet = rc;
1885
                break;
1681
                break;
1886
            }
1682
            }
1887
1888
            /*
1683
            /*
1889
             * Issue the request and unlock the pages.
1684
             * Issue the request and unlock the pages.
1890
             */
1685
             */
Lines 1896-1919 static ssize_t vbsf_reg_write_locking(st Link Here
1896
                 */
1691
                 */
1897
                uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
1692
                uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
1898
                AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
1693
                AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
1899
1900
                vbsf_reg_write_sync_page_cache(inode->i_mapping, offFile, cbActual, NULL /*pbKrnlBuf*/,
1694
                vbsf_reg_write_sync_page_cache(inode->i_mapping, offFile, cbActual, NULL /*pbKrnlBuf*/,
1901
                                               papPages, (uintptr_t)buf & PAGE_OFFSET_MASK, cPages);
1695
                                               papPages, (uintptr_t)buf & PAGE_OFFSET_MASK, cPages);
1902
                Assert(cPages <= cMaxPages);
1696
                Assert(cPages <= cMaxPages);
1903
                vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack);
1697
                vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack);
1904
1905
                cbRet   += cbActual;
1698
                cbRet   += cbActual;
1906
                buf      = (uint8_t *)buf + cbActual;
1699
                buf      = (uint8_t *)buf + cbActual;
1907
                size    -= cbActual;
1700
                size    -= cbActual;
1908
1909
                offFile += cbActual;
1701
                offFile += cbActual;
1910
                if ((file->f_flags & O_APPEND) && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
1702
                if ((file->f_flags & O_APPEND) && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
1911
                    offFile = pReq->Parms.off64Write.u.value64;
1703
                    offFile = pReq->Parms.off64Write.u.value64;
1912
                if (offFile > i_size_read(inode))
1704
                if (offFile > i_size_read(inode))
1913
                    i_size_write(inode, offFile);
1705
                    i_size_write(inode, offFile);
1914
1915
                sf_i->force_restat = 1; /* mtime (and size) may have changed */
1706
                sf_i->force_restat = 1; /* mtime (and size) may have changed */
1916
1917
                /*
1707
                /*
1918
                 * Are we done already?  If so commit the new file offset.
1708
                 * Are we done already?  If so commit the new file offset.
1919
                 */
1709
                 */
Lines 1954-1961 static ssize_t vbsf_reg_write_locking(st Link Here
1954
    SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off));
1744
    SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off));
1955
    return cbRet;
1745
    return cbRet;
1956
}
1746
}
1957
1958
1959
/**
1747
/**
1960
 * Write to a regular file.
1748
 * Write to a regular file.
1961
 *
1749
 *
Lines 1973-1999 static ssize_t vbsf_reg_write(struct fil Link Here
1973
    struct vbsf_reg_info   *sf_r       = file->private_data;
1761
    struct vbsf_reg_info   *sf_r       = file->private_data;
1974
    struct address_space   *mapping    = inode->i_mapping;
1762
    struct address_space   *mapping    = inode->i_mapping;
1975
    loff_t                  pos;
1763
    loff_t                  pos;
1976
1977
    SFLOGFLOW(("vbsf_reg_write: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
1764
    SFLOGFLOW(("vbsf_reg_write: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
1978
    Assert(sf_i);
1765
    Assert(sf_i);
1979
    Assert(pSuperInfo);
1766
    Assert(pSuperInfo);
1980
    Assert(sf_r);
1767
    Assert(sf_r);
1981
    AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
1768
    AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
1982
1983
    pos = *off;
1769
    pos = *off;
1984
    if (file->f_flags & O_APPEND)
1770
    if (file->f_flags & O_APPEND)
1985
        pos = i_size_read(inode);
1771
        pos = i_size_read(inode);
1986
1987
    /** @todo XXX Check write permission according to inode->i_mode! */
1772
    /** @todo XXX Check write permission according to inode->i_mode! */
1988
1989
    if (!size) {
1773
    if (!size) {
1990
        if (file->f_flags & O_APPEND)  /** @todo check if this is the consensus behavior... */
1774
        if (file->f_flags & O_APPEND)  /** @todo check if this is the consensus behavior... */
1991
            *off = pos;
1775
            *off = pos;
1992
        return 0;
1776
        return 0;
1993
    }
1777
    }
1994
1995
    /** @todo Implement the read-write caching mode. */
1778
    /** @todo Implement the read-write caching mode. */
1996
1997
    /*
1779
    /*
1998
     * If there are active writable mappings, coordinate with any
1780
     * If there are active writable mappings, coordinate with any
1999
     * pending writes via those.
1781
     * pending writes via those.
Lines 2009-2015 static ssize_t vbsf_reg_write(struct fil Link Here
2009
        /** @todo ...   */
1791
        /** @todo ...   */
2010
# endif
1792
# endif
2011
    }
1793
    }
2012
2013
    /*
1794
    /*
2014
     * For small requests, try use an embedded buffer provided we get a heap block
1795
     * For small requests, try use an embedded buffer provided we get a heap block
2015
     * that does not cross page boundraries (see host code).
1796
     * that does not cross page boundraries (see host code).
Lines 2040-2046 static ssize_t vbsf_reg_write(struct fil Link Here
2040
                sf_i->force_restat = 1; /* mtime (and size) may have changed */
1821
                sf_i->force_restat = 1; /* mtime (and size) may have changed */
2041
            } else
1822
            } else
2042
                cbRet = -EFAULT;
1823
                cbRet = -EFAULT;
2043
2044
            VbglR0PhysHeapFree(pReq);
1824
            VbglR0PhysHeapFree(pReq);
2045
            SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [embed]\n", cbRet, cbRet, *off));
1825
            SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [embed]\n", cbRet, cbRet, *off));
2046
            return cbRet;
1826
            return cbRet;
Lines 2048-2054 static ssize_t vbsf_reg_write(struct fil Link Here
2048
        if (pReq)
1828
        if (pReq)
2049
            VbglR0PhysHeapFree(pReq);
1829
            VbglR0PhysHeapFree(pReq);
2050
    }
1830
    }
2051
2052
# if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
1831
# if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
2053
    /*
1832
    /*
2054
     * For medium sized requests try use a bounce buffer.
1833
     * For medium sized requests try use a bounce buffer.
Lines 2089-2101 static ssize_t vbsf_reg_write(struct fil Link Here
2089
        }
1868
        }
2090
    }
1869
    }
2091
# endif
1870
# endif
2092
2093
    return vbsf_reg_write_locking(file, buf, size, off, pos, inode, sf_i, pSuperInfo, sf_r);
1871
    return vbsf_reg_write_locking(file, buf, size, off, pos, inode, sf_i, pSuperInfo, sf_r);
2094
}
1872
}
2095
2096
#endif /* < 5.10.0 */
1873
#endif /* < 5.10.0 */
2097
#if RTLNX_VER_MIN(2,6,19)
1874
#if RTLNX_VER_MIN(2,6,19)
2098
2099
/**
1875
/**
2100
 * Companion to vbsf_iter_lock_pages().
1876
 * Companion to vbsf_iter_lock_pages().
2101
 */
1877
 */
Lines 2104-2110 DECLINLINE(void) vbsf_iter_unlock_pages( Link Here
2104
    /* We don't mark kernel pages dirty (KVECs, BVECs, PIPEs): */
1880
    /* We don't mark kernel pages dirty (KVECs, BVECs, PIPEs): */
2105
    if (!iter_is_iovec(iter))
1881
    if (!iter_is_iovec(iter))
2106
        fSetDirty = false;
1882
        fSetDirty = false;
2107
2108
    while (cPages-- > 0)
1883
    while (cPages-- > 0)
2109
    {
1884
    {
2110
        struct page *pPage = papPages[cPages];
1885
        struct page *pPage = papPages[cPages];
Lines 2113-2120 DECLINLINE(void) vbsf_iter_unlock_pages( Link Here
2113
        vbsf_put_page(pPage);
1888
        vbsf_put_page(pPage);
2114
    }
1889
    }
2115
}
1890
}
2116
2117
2118
/**
1891
/**
2119
 * Locks up to @a cMaxPages from the I/O vector iterator, advancing the
1892
 * Locks up to @a cMaxPages from the I/O vector iterator, advancing the
2120
 * iterator.
1893
 * iterator.
Lines 2136-2142 static int vbsf_iter_lock_pages(struct i Link Here
2136
    size_t cPages   = 0;
1909
    size_t cPages   = 0;
2137
    size_t offPage0 = 0;
1910
    size_t offPage0 = 0;
2138
    int    rc       = 0;
1911
    int    rc       = 0;
2139
2140
    Assert(iov_iter_count(iter) + pStash->cb > 0);
1912
    Assert(iov_iter_count(iter) + pStash->cb > 0);
2141
    if (!(VBSF_GET_ITER_TYPE(iter) & ITER_KVEC)) {
1913
    if (!(VBSF_GET_ITER_TYPE(iter) & ITER_KVEC)) {
2142
        /*
1914
        /*
Lines 2170-2176 static int vbsf_iter_lock_pages(struct i Link Here
2170
            pStash->Copy       = *iter;
1942
            pStash->Copy       = *iter;
2171
# endif
1943
# endif
2172
        }
1944
        }
2173
2174
        /*
1945
        /*
2175
         * Get pages segment by segment.
1946
         * Get pages segment by segment.
2176
         */
1947
         */
Lines 2223-2229 static int vbsf_iter_lock_pages(struct i Link Here
2223
                        if (   cMaxPages == 0
1994
                        if (   cMaxPages == 0
2224
                            || cbSegRet != PAGE_SIZE)
1995
                            || cbSegRet != PAGE_SIZE)
2225
                            break;
1996
                            break;
2226
2227
                        /*
1997
                        /*
2228
                         * Get the rest of the segment (if anything remaining).
1998
                         * Get the rest of the segment (if anything remaining).
2229
                         */
1999
                         */
Lines 2265-2271 static int vbsf_iter_lock_pages(struct i Link Here
2265
            }
2035
            }
2266
            Assert(cMaxPages > 0);
2036
            Assert(cMaxPages > 0);
2267
        } while (iov_iter_count(iter) > 0);
2037
        } while (iov_iter_count(iter) > 0);
2268
2269
    } else {
2038
    } else {
2270
        /*
2039
        /*
2271
         * The silly iov_iter_get_pages_alloc() function doesn't handle KVECs,
2040
         * The silly iov_iter_get_pages_alloc() function doesn't handle KVECs,
Lines 2282-2294 static int vbsf_iter_lock_pages(struct i Link Here
2282
            uint8_t *pbBuf;
2051
            uint8_t *pbBuf;
2283
            size_t   offStart;
2052
            size_t   offStart;
2284
            size_t   cPgSeg;
2053
            size_t   cPgSeg;
2285
2286
            size_t   cbSeg = iov_iter_single_seg_count(iter);
2054
            size_t   cbSeg = iov_iter_single_seg_count(iter);
2287
            while (!cbSeg) {
2055
            while (!cbSeg) {
2288
                iov_iter_advance(iter, 0);
2056
                iov_iter_advance(iter, 0);
2289
                cbSeg = iov_iter_single_seg_count(iter);
2057
                cbSeg = iov_iter_single_seg_count(iter);
2290
            }
2058
            }
2291
2292
# if RTLNX_VER_MIN(3,19,0)
2059
# if RTLNX_VER_MIN(3,19,0)
2293
            pbBuf    = iter->kvec->iov_base + iter->iov_offset;
2060
            pbBuf    = iter->kvec->iov_base + iter->iov_offset;
2294
# else
2061
# else
Lines 2299-2311 static int vbsf_iter_lock_pages(struct i Link Here
2299
                offPage0 = offStart;
2066
                offPage0 = offStart;
2300
            else if (offStart)
2067
            else if (offStart)
2301
                break;
2068
                break;
2302
2303
            cPgSeg = RT_ALIGN_Z(cbSeg, PAGE_SIZE) >> PAGE_SHIFT;
2069
            cPgSeg = RT_ALIGN_Z(cbSeg, PAGE_SIZE) >> PAGE_SHIFT;
2304
            if (cPgSeg > cMaxPages) {
2070
            if (cPgSeg > cMaxPages) {
2305
                cPgSeg = cMaxPages;
2071
                cPgSeg = cMaxPages;
2306
                cbSeg  = (cPgSeg << PAGE_SHIFT) - offStart;
2072
                cbSeg  = (cPgSeg << PAGE_SHIFT) - offStart;
2307
            }
2073
            }
2308
2309
            rc = vbsf_lock_kernel_pages(pbBuf, fWrite, cPgSeg, &papPages[cPages]);
2074
            rc = vbsf_lock_kernel_pages(pbBuf, fWrite, cPgSeg, &papPages[cPages]);
2310
            if (rc == 0) {
2075
            if (rc == 0) {
2311
                iov_iter_advance(iter, cbSeg);
2076
                iov_iter_advance(iter, cbSeg);
Lines 2319-2325 static int vbsf_iter_lock_pages(struct i Link Here
2319
                break;
2084
                break;
2320
        } while (iov_iter_count(iter) > 0);
2085
        } while (iov_iter_count(iter) > 0);
2321
    }
2086
    }
2322
2323
    /*
2087
    /*
2324
     * Clean up if we failed; set return values.
2088
     * Clean up if we failed; set return values.
2325
     */
2089
     */
Lines 2336-2343 static int vbsf_iter_lock_pages(struct i Link Here
2336
    SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx\n", rc, cPages, offPage0, cbChunk));
2100
    SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx\n", rc, cPages, offPage0, cbChunk));
2337
    return rc;
2101
    return rc;
2338
}
2102
}
2339
2340
2341
/**
2103
/**
2342
 * Rewinds the I/O vector.
2104
 * Rewinds the I/O vector.
2343
 */
2105
 */
Lines 2353-2359 static bool vbsf_iter_rewind(struct iov_ Link Here
2353
        pStash->cb    = 0;
2115
        pStash->cb    = 0;
2354
        pStash->off   = 0;
2116
        pStash->off   = 0;
2355
    }
2117
    }
2356
2357
# if RTLNX_VER_MIN(4,11,0) || RTLNX_VER_MAX(3,16,0)
2118
# if RTLNX_VER_MIN(4,11,0) || RTLNX_VER_MAX(3,16,0)
2358
    iov_iter_revert(iter, cbToRewind + cbExtra);
2119
    iov_iter_revert(iter, cbToRewind + cbExtra);
2359
    return true;
2120
    return true;
Lines 2362-2369 static bool vbsf_iter_rewind(struct iov_ Link Here
2362
    return false;
2123
    return false;
2363
# endif
2124
# endif
2364
}
2125
}
2365
2366
2367
/**
2126
/**
2368
 * Cleans up the page locking stash.
2127
 * Cleans up the page locking stash.
2369
 */
2128
 */
Lines 2372-2379 DECLINLINE(void) vbsf_iter_cleanup_stash Link Here
2372
    if (pStash->pPage)
2131
    if (pStash->pPage)
2373
        vbsf_iter_rewind(iter, pStash, 0, 0);
2132
        vbsf_iter_rewind(iter, pStash, 0, 0);
2374
}
2133
}
2375
2376
2377
/**
2134
/**
2378
 * Calculates the longest span of pages we could transfer to the host in a
2135
 * Calculates the longest span of pages we could transfer to the host in a
2379
 * single request.
2136
 * single request.
Lines 2390-2404 static size_t vbsf_iter_max_span_of_page Link Here
2390
        const struct iovec *pCurIov    = iter->iov;
2147
        const struct iovec *pCurIov    = iter->iov;
2391
        size_t              cLeft      = iter->nr_segs;
2148
        size_t              cLeft      = iter->nr_segs;
2392
        size_t              cPagesSpan = 0;
2149
        size_t              cPagesSpan = 0;
2393
2394
        /* iovect and kvec are identical, except for the __user tagging of iov_base. */
2150
        /* iovect and kvec are identical, except for the __user tagging of iov_base. */
2395
        AssertCompileMembersSameSizeAndOffset(struct iovec, iov_base, struct kvec, iov_base);
2151
        AssertCompileMembersSameSizeAndOffset(struct iovec, iov_base, struct kvec, iov_base);
2396
        AssertCompileMembersSameSizeAndOffset(struct iovec, iov_len,  struct kvec, iov_len);
2152
        AssertCompileMembersSameSizeAndOffset(struct iovec, iov_len,  struct kvec, iov_len);
2397
        AssertCompile(sizeof(struct iovec) == sizeof(struct kvec));
2153
        AssertCompile(sizeof(struct iovec) == sizeof(struct kvec));
2398
2399
        cPages = 1;
2154
        cPages = 1;
2400
        AssertReturn(cLeft > 0, cPages);
2155
        AssertReturn(cLeft > 0, cPages);
2401
2402
        /* Special case: segment offset. */
2156
        /* Special case: segment offset. */
2403
        if (iter->iov_offset > 0) {
2157
        if (iter->iov_offset > 0) {
2404
            if (iter->iov_offset < pCurIov->iov_len) {
2158
            if (iter->iov_offset < pCurIov->iov_len) {
Lines 2412-2418 static size_t vbsf_iter_max_span_of_page Link Here
2412
            pCurIov++;
2166
            pCurIov++;
2413
            cLeft--;
2167
            cLeft--;
2414
        }
2168
        }
2415
2416
        /* Full segments. */
2169
        /* Full segments. */
2417
        while (cLeft-- > 0) {
2170
        while (cLeft-- > 0) {
2418
            if (pCurIov->iov_len > 0) {
2171
            if (pCurIov->iov_len > 0) {
Lines 2455-2462 static size_t vbsf_iter_max_span_of_page Link Here
2455
    SFLOGFLOW(("vbsf_iter_max_span_of_pages: returns %#zx\n", cPages));
2208
    SFLOGFLOW(("vbsf_iter_max_span_of_pages: returns %#zx\n", cPages));
2456
    return cPages;
2209
    return cPages;
2457
}
2210
}
2458
2459
2460
/**
2211
/**
2461
 * Worker for vbsf_reg_read_iter() that deals with larger reads using page
2212
 * Worker for vbsf_reg_read_iter() that deals with larger reads using page
2462
 * locking.
2213
 * locking.
Lines 2475-2481 static ssize_t vbsf_reg_read_iter_lockin Link Here
2475
    ssize_t             cbRet        = 0;
2226
    ssize_t             cbRet        = 0;
2476
    size_t              cMaxPages    = vbsf_iter_max_span_of_pages(iter);
2227
    size_t              cMaxPages    = vbsf_iter_max_span_of_pages(iter);
2477
    cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages);
2228
    cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages);
2478
2479
    pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
2229
    pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
2480
    while (!pReq && cMaxPages > 4) {
2230
    while (!pReq && cMaxPages > 4) {
2481
        cMaxPages /= 2;
2231
        cMaxPages /= 2;
Lines 2484-2490 static ssize_t vbsf_reg_read_iter_lockin Link Here
2484
    if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
2234
    if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
2485
        papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
2235
        papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
2486
    if (pReq && papPages) {
2236
    if (pReq && papPages) {
2487
2488
        /*
2237
        /*
2489
         * The read loop.
2238
         * The read loop.
2490
         */
2239
         */
Lines 2509-2524 static ssize_t vbsf_reg_read_iter_lockin Link Here
2509
                cbRet = rc;
2258
                cbRet = rc;
2510
                break;
2259
                break;
2511
            }
2260
            }
2512
2513
            /*
2261
            /*
2514
             * Issue the request and unlock the pages.
2262
             * Issue the request and unlock the pages.
2515
             */
2263
             */
2516
            rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, kio->ki_pos, cbChunk, cPages);
2264
            rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, kio->ki_pos, cbChunk, cPages);
2517
            SFLOGFLOW(("vbsf_reg_read_iter_locking: VbglR0SfHostReqReadPgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n",
2265
            SFLOGFLOW(("vbsf_reg_read_iter_locking: VbglR0SfHostReqReadPgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n",
2518
                       rc, pReq->Parms.cb32Read.u.value32, cbChunk, cbToRead, cPages, offPage0));
2266
                       rc, pReq->Parms.cb32Read.u.value32, cbChunk, cbToRead, cPages, offPage0));
2519
2520
            vbsf_iter_unlock_pages(iter, papPages, cPages, true /*fSetDirty*/);
2267
            vbsf_iter_unlock_pages(iter, papPages, cPages, true /*fSetDirty*/);
2521
2522
            if (RT_SUCCESS(rc)) {
2268
            if (RT_SUCCESS(rc)) {
2523
                /*
2269
                /*
2524
                 * Success, advance position and buffer.
2270
                 * Success, advance position and buffer.
Lines 2528-2534 static ssize_t vbsf_reg_read_iter_lockin Link Here
2528
                cbRet       += cbActual;
2274
                cbRet       += cbActual;
2529
                kio->ki_pos += cbActual;
2275
                kio->ki_pos += cbActual;
2530
                cbToRead    -= cbActual;
2276
                cbToRead    -= cbActual;
2531
2532
                /*
2277
                /*
2533
                 * Are we done already?
2278
                 * Are we done already?
2534
                 */
2279
                 */
Lines 2562-2568 static ssize_t vbsf_reg_read_iter_lockin Link Here
2562
                }
2307
                }
2563
            }
2308
            }
2564
        } while (cbToRead > 0);
2309
        } while (cbToRead > 0);
2565
2566
        vbsf_iter_cleanup_stash(iter, &Stash);
2310
        vbsf_iter_cleanup_stash(iter, &Stash);
2567
    }
2311
    }
2568
    else
2312
    else
Lines 2574-2581 static ssize_t vbsf_reg_read_iter_lockin Link Here
2574
    SFLOGFLOW(("vbsf_reg_read_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
2318
    SFLOGFLOW(("vbsf_reg_read_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
2575
    return cbRet;
2319
    return cbRet;
2576
}
2320
}
2577
2578
2579
/**
2321
/**
2580
 * Read into I/O vector iterator.
2322
 * Read into I/O vector iterator.
2581
 *
2323
 *
Lines 2596-2615 static ssize_t vbsf_reg_aio_read(struct Link Here
2596
    size_t                  cbToRead   = iov_iter_count(iter);
2338
    size_t                  cbToRead   = iov_iter_count(iter);
2597
    struct inode           *inode      = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
2339
    struct inode           *inode      = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
2598
    struct address_space   *mapping    = inode->i_mapping;
2340
    struct address_space   *mapping    = inode->i_mapping;
2599
2600
    struct vbsf_reg_info   *sf_r       = kio->ki_filp->private_data;
2341
    struct vbsf_reg_info   *sf_r       = kio->ki_filp->private_data;
2601
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
2342
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
2602
2603
    SFLOGFLOW(("vbsf_reg_read_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
2343
    SFLOGFLOW(("vbsf_reg_read_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
2604
               inode, kio->ki_filp, cbToRead, kio->ki_pos, VBSF_GET_ITER_TYPE(iter) ));
2344
               inode, kio->ki_filp, cbToRead, kio->ki_pos, VBSF_GET_ITER_TYPE(iter) ));
2605
    AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
2345
    AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
2606
2607
    /*
2346
    /*
2608
     * Do we have anything at all to do here?
2347
     * Do we have anything at all to do here?
2609
     */
2348
     */
2610
    if (!cbToRead)
2349
    if (!cbToRead)
2611
        return 0;
2350
        return 0;
2612
2613
    /*
2351
    /*
2614
     * If there is a mapping and O_DIRECT isn't in effect, we must at a
2352
     * If there is a mapping and O_DIRECT isn't in effect, we must at a
2615
     * heed dirty pages in the mapping and read from them.  For simplicity
2353
     * heed dirty pages in the mapping and read from them.  For simplicity
Lines 2623-2629 static ssize_t vbsf_reg_aio_read(struct Link Here
2623
        return generic_file_aio_read(kio, iov, cSegs, offFile);
2361
        return generic_file_aio_read(kio, iov, cSegs, offFile);
2624
# endif
2362
# endif
2625
    }
2363
    }
2626
2627
    /*
2364
    /*
2628
     * Now now we reject async I/O requests.
2365
     * Now now we reject async I/O requests.
2629
     */
2366
     */
Lines 2631-2637 static ssize_t vbsf_reg_aio_read(struct Link Here
2631
        SFLOGFLOW(("vbsf_reg_read_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
2368
        SFLOGFLOW(("vbsf_reg_read_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
2632
        return -EOPNOTSUPP;
2369
        return -EOPNOTSUPP;
2633
    }
2370
    }
2634
2635
    /*
2371
    /*
2636
     * For small requests, try use an embedded buffer provided we get a heap block
2372
     * For small requests, try use an embedded buffer provided we get a heap block
2637
     * that does not cross page boundraries (see host code).
2373
     * that does not cross page boundraries (see host code).
Lines 2662-2675 static ssize_t vbsf_reg_aio_read(struct Link Here
2662
            VbglR0PhysHeapFree(pReq);
2398
            VbglR0PhysHeapFree(pReq);
2663
        }
2399
        }
2664
    }
2400
    }
2665
2666
    /*
2401
    /*
2667
     * Otherwise do the page locking thing.
2402
     * Otherwise do the page locking thing.
2668
     */
2403
     */
2669
    return vbsf_reg_read_iter_locking(kio, iter, cbToRead, pSuperInfo, sf_r);
2404
    return vbsf_reg_read_iter_locking(kio, iter, cbToRead, pSuperInfo, sf_r);
2670
}
2405
}
2671
2672
2673
/**
2406
/**
2674
 * Worker for vbsf_reg_write_iter() that deals with larger writes using page
2407
 * Worker for vbsf_reg_write_iter() that deals with larger writes using page
2675
 * locking.
2408
 * locking.
Lines 2689-2695 static ssize_t vbsf_reg_write_iter_locki Link Here
2689
    ssize_t              cbRet        = 0;
2422
    ssize_t              cbRet        = 0;
2690
    size_t               cMaxPages    = vbsf_iter_max_span_of_pages(iter);
2423
    size_t               cMaxPages    = vbsf_iter_max_span_of_pages(iter);
2691
    cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages);
2424
    cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages);
2692
2693
    pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
2425
    pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
2694
    while (!pReq && cMaxPages > 4) {
2426
    while (!pReq && cMaxPages > 4) {
2695
        cMaxPages /= 2;
2427
        cMaxPages /= 2;
Lines 2698-2704 static ssize_t vbsf_reg_write_iter_locki Link Here
2698
    if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
2430
    if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
2699
        papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
2431
        papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
2700
    if (pReq && papPages) {
2432
    if (pReq && papPages) {
2701
2702
        /*
2433
        /*
2703
         * The write loop.
2434
         * The write loop.
2704
         */
2435
         */
Lines 2723-2729 static ssize_t vbsf_reg_write_iter_locki Link Here
2723
                cbRet = rc;
2454
                cbRet = rc;
2724
                break;
2455
                break;
2725
            }
2456
            }
2726
2727
            /*
2457
            /*
2728
             * Issue the request and unlock the pages.
2458
             * Issue the request and unlock the pages.
2729
             */
2459
             */
Lines 2737-2758 static ssize_t vbsf_reg_write_iter_locki Link Here
2737
                 */
2467
                 */
2738
                uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
2468
                uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
2739
                AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
2469
                AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
2740
2741
                vbsf_reg_write_sync_page_cache(mapping, offFile, cbActual, NULL /*pbSrcBuf*/, papPages, offPage0, cPages);
2470
                vbsf_reg_write_sync_page_cache(mapping, offFile, cbActual, NULL /*pbSrcBuf*/, papPages, offPage0, cPages);
2742
                vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
2471
                vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
2743
2744
                cbRet      += cbActual;
2472
                cbRet      += cbActual;
2745
                cbToWrite  -= cbActual;
2473
                cbToWrite  -= cbActual;
2746
2747
                offFile    += cbActual;
2474
                offFile    += cbActual;
2748
                if (fAppend && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
2475
                if (fAppend && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
2749
                    offFile = pReq->Parms.off64Write.u.value64;
2476
                    offFile = pReq->Parms.off64Write.u.value64;
2750
                kio->ki_pos = offFile;
2477
                kio->ki_pos = offFile;
2751
                if (offFile > i_size_read(inode))
2478
                if (offFile > i_size_read(inode))
2752
                    i_size_write(inode, offFile);
2479
                    i_size_write(inode, offFile);
2753
2754
                sf_i->force_restat = 1; /* mtime (and size) may have changed */
2480
                sf_i->force_restat = 1; /* mtime (and size) may have changed */
2755
2756
                /*
2481
                /*
2757
                 * Are we done already?
2482
                 * Are we done already?
2758
                 */
2483
                 */
Lines 2788-2794 static ssize_t vbsf_reg_write_iter_locki Link Here
2788
                }
2513
                }
2789
            }
2514
            }
2790
        } while (cbToWrite > 0);
2515
        } while (cbToWrite > 0);
2791
2792
        vbsf_iter_cleanup_stash(iter, &Stash);
2516
        vbsf_iter_cleanup_stash(iter, &Stash);
2793
    }
2517
    }
2794
    else
2518
    else
Lines 2800-2807 static ssize_t vbsf_reg_write_iter_locki Link Here
2800
    SFLOGFLOW(("vbsf_reg_write_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
2524
    SFLOGFLOW(("vbsf_reg_write_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
2801
    return cbRet;
2525
    return cbRet;
2802
}
2526
}
2803
2804
2805
/**
2527
/**
2806
 * Write from I/O vector iterator.
2528
 * Write from I/O vector iterator.
2807
 *
2529
 *
Lines 2823-2829 static ssize_t vbsf_reg_aio_write(struct Link Here
2823
    struct inode           *inode      = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
2545
    struct inode           *inode      = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
2824
    struct vbsf_inode_info *sf_i       = VBSF_GET_INODE_INFO(inode);
2546
    struct vbsf_inode_info *sf_i       = VBSF_GET_INODE_INFO(inode);
2825
    struct address_space   *mapping    = inode->i_mapping;
2547
    struct address_space   *mapping    = inode->i_mapping;
2826
2827
    struct vbsf_reg_info   *sf_r       = kio->ki_filp->private_data;
2548
    struct vbsf_reg_info   *sf_r       = kio->ki_filp->private_data;
2828
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
2549
    struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
2829
# if RTLNX_VER_MIN(3,16,0)
2550
# if RTLNX_VER_MIN(3,16,0)
Lines 2834-2859 static ssize_t vbsf_reg_aio_write(struct Link Here
2834
# else
2555
# else
2835
    bool const              fAppend    = RT_BOOL(kio->ki_filp->f_flags & O_APPEND);
2556
    bool const              fAppend    = RT_BOOL(kio->ki_filp->f_flags & O_APPEND);
2836
# endif
2557
# endif
2837
2838
2839
    SFLOGFLOW(("vbsf_reg_write_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
2558
    SFLOGFLOW(("vbsf_reg_write_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
2840
               inode, kio->ki_filp, cbToWrite, offFile, VBSF_GET_ITER_TYPE(iter) ));
2559
               inode, kio->ki_filp, cbToWrite, offFile, VBSF_GET_ITER_TYPE(iter) ));
2841
    AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
2560
    AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
2842
2843
    /*
2561
    /*
2844
     * Enforce APPEND flag (more later).
2562
     * Enforce APPEND flag (more later).
2845
     */
2563
     */
2846
    if (fAppend)
2564
    if (fAppend)
2847
        kio->ki_pos = offFile = i_size_read(inode);
2565
        kio->ki_pos = offFile = i_size_read(inode);
2848
2849
    /*
2566
    /*
2850
     * Do we have anything at all to do here?
2567
     * Do we have anything at all to do here?
2851
     */
2568
     */
2852
    if (!cbToWrite)
2569
    if (!cbToWrite)
2853
        return 0;
2570
        return 0;
2854
2855
    /** @todo Implement the read-write caching mode. */
2571
    /** @todo Implement the read-write caching mode. */
2856
2857
    /*
2572
    /*
2858
     * Now now we reject async I/O requests.
2573
     * Now now we reject async I/O requests.
2859
     */
2574
     */
Lines 2861-2867 static ssize_t vbsf_reg_aio_write(struct Link Here
2861
        SFLOGFLOW(("vbsf_reg_write_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
2576
        SFLOGFLOW(("vbsf_reg_write_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
2862
        return -EOPNOTSUPP;
2577
        return -EOPNOTSUPP;
2863
    }
2578
    }
2864
2865
    /*
2579
    /*
2866
     * If there are active writable mappings, coordinate with any
2580
     * If there are active writable mappings, coordinate with any
2867
     * pending writes via those.
2581
     * pending writes via those.
Lines 2877-2883 static ssize_t vbsf_reg_aio_write(struct Link Here
2877
        /** @todo ... */
2591
        /** @todo ... */
2878
# endif
2592
# endif
2879
    }
2593
    }
2880
2881
    /*
2594
    /*
2882
     * For small requests, try use an embedded buffer provided we get a heap block
2595
     * For small requests, try use an embedded buffer provided we get a heap block
2883
     * that does not cross page boundraries (see host code).
2596
     * that does not cross page boundraries (see host code).
Lines 2897-2910 static ssize_t vbsf_reg_aio_write(struct Link Here
2897
                        AssertStmt(cbRet <= (ssize_t)cbToWrite, cbRet = cbToWrite);
2610
                        AssertStmt(cbRet <= (ssize_t)cbToWrite, cbRet = cbToWrite);
2898
                        vbsf_reg_write_sync_page_cache(mapping, offFile, (uint32_t)cbRet, pReq->abData,
2611
                        vbsf_reg_write_sync_page_cache(mapping, offFile, (uint32_t)cbRet, pReq->abData,
2899
                                                       NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
2612
                                                       NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
2900
2901
                        offFile += cbRet;
2613
                        offFile += cbRet;
2902
                        if (fAppend && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
2614
                        if (fAppend && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET))
2903
                            offFile = pReq->Parms.off64Write.u.value64;
2615
                            offFile = pReq->Parms.off64Write.u.value64;
2904
                        kio->ki_pos = offFile;
2616
                        kio->ki_pos = offFile;
2905
                        if (offFile > i_size_read(inode))
2617
                        if (offFile > i_size_read(inode))
2906
                            i_size_write(inode, offFile);
2618
                            i_size_write(inode, offFile);
2907
2908
# if RTLNX_VER_MIN(4,11,0)
2619
# if RTLNX_VER_MIN(4,11,0)
2909
                        if ((size_t)cbRet < cbToWrite)
2620
                        if ((size_t)cbRet < cbToWrite)
2910
                            iov_iter_revert(iter, cbToWrite - cbRet);
2621
                            iov_iter_revert(iter, cbToWrite - cbRet);
Lines 2921-2935 static ssize_t vbsf_reg_aio_write(struct Link Here
2921
            VbglR0PhysHeapFree(pReq);
2632
            VbglR0PhysHeapFree(pReq);
2922
        }
2633
        }
2923
    }
2634
    }
2924
2925
    /*
2635
    /*
2926
     * Otherwise do the page locking thing.
2636
     * Otherwise do the page locking thing.
2927
     */
2637
     */
2928
    return vbsf_reg_write_iter_locking(kio, iter, cbToWrite, offFile, pSuperInfo, sf_r, inode, sf_i, mapping, fAppend);
2638
    return vbsf_reg_write_iter_locking(kio, iter, cbToWrite, offFile, pSuperInfo, sf_r, inode, sf_i, mapping, fAppend);
2929
}
2639
}
2930
2931
#endif /* >= 2.6.19 */
2640
#endif /* >= 2.6.19 */
2932
2933
/**
2641
/**
2934
 * Used by vbsf_reg_open() and vbsf_inode_atomic_open() to
2642
 * Used by vbsf_reg_open() and vbsf_inode_atomic_open() to
2935
 *
2643
 *
Lines 2941-2947 static ssize_t vbsf_reg_aio_write(struct Link Here
2941
uint32_t vbsf_linux_oflags_to_vbox(unsigned fLnxOpen, uint32_t *pfHandle, const char *pszCaller)
2649
uint32_t vbsf_linux_oflags_to_vbox(unsigned fLnxOpen, uint32_t *pfHandle, const char *pszCaller)
2942
{
2650
{
2943
    uint32_t fVBoxFlags = SHFL_CF_ACCESS_DENYNONE;
2651
    uint32_t fVBoxFlags = SHFL_CF_ACCESS_DENYNONE;
2944
2945
    /*
2652
    /*
2946
     * Disposition.
2653
     * Disposition.
2947
     */
2654
     */
Lines 2963-2969 uint32_t vbsf_linux_oflags_to_vbox(unsig Link Here
2963
            fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
2670
            fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
2964
        }
2671
        }
2965
    }
2672
    }
2966
2967
    /*
2673
    /*
2968
     * Access.
2674
     * Access.
2969
     */
2675
     */
Lines 2972-2998 uint32_t vbsf_linux_oflags_to_vbox(unsig Link Here
2972
            fVBoxFlags |= SHFL_CF_ACCESS_READ;
2678
            fVBoxFlags |= SHFL_CF_ACCESS_READ;
2973
            *pfHandle  |= VBSF_HANDLE_F_READ;
2679
            *pfHandle  |= VBSF_HANDLE_F_READ;
2974
            break;
2680
            break;
2975
2976
        case O_WRONLY:
2681
        case O_WRONLY:
2977
            fVBoxFlags |= SHFL_CF_ACCESS_WRITE;
2682
            fVBoxFlags |= SHFL_CF_ACCESS_WRITE;
2978
            *pfHandle  |= VBSF_HANDLE_F_WRITE;
2683
            *pfHandle  |= VBSF_HANDLE_F_WRITE;
2979
            break;
2684
            break;
2980
2981
        case O_RDWR:
2685
        case O_RDWR:
2982
            fVBoxFlags |= SHFL_CF_ACCESS_READWRITE;
2686
            fVBoxFlags |= SHFL_CF_ACCESS_READWRITE;
2983
            *pfHandle  |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE;
2687
            *pfHandle  |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE;
2984
            break;
2688
            break;
2985
2986
        default:
2689
        default:
2987
            BUG();
2690
            BUG();
2988
    }
2691
    }
2989
2990
    if (fLnxOpen & O_APPEND) {
2692
    if (fLnxOpen & O_APPEND) {
2991
        Log(("%s: O_APPEND set\n", pszCaller));
2693
        Log(("%s: O_APPEND set\n", pszCaller));
2992
        fVBoxFlags |= SHFL_CF_ACCESS_APPEND;
2694
        fVBoxFlags |= SHFL_CF_ACCESS_APPEND;
2993
        *pfHandle  |= VBSF_HANDLE_F_APPEND;
2695
        *pfHandle  |= VBSF_HANDLE_F_APPEND;
2994
    }
2696
    }
2995
2996
    /*
2697
    /*
2997
     * Only directories?
2698
     * Only directories?
2998
     */
2699
     */
Lines 3000-3010 uint32_t vbsf_linux_oflags_to_vbox(unsig Link Here
3000
        Log(("%s: O_DIRECTORY set\n", pszCaller));
2701
        Log(("%s: O_DIRECTORY set\n", pszCaller));
3001
        fVBoxFlags |= SHFL_CF_DIRECTORY;
2702
        fVBoxFlags |= SHFL_CF_DIRECTORY;
3002
    }
2703
    }
3003
3004
    return fVBoxFlags;
2704
    return fVBoxFlags;
3005
}
2705
}
3006
3007
3008
/**
2706
/**
3009
 * Open a regular file.
2707
 * Open a regular file.
3010
 *
2708
 *
Lines 3020-3041 static int vbsf_reg_open(struct inode *i Link Here
3020
    struct dentry          *dentry     = VBSF_GET_F_DENTRY(file);
2718
    struct dentry          *dentry     = VBSF_GET_F_DENTRY(file);
3021
    struct vbsf_reg_info   *sf_r;
2719
    struct vbsf_reg_info   *sf_r;
3022
    VBOXSFCREATEREQ        *pReq;
2720
    VBOXSFCREATEREQ        *pReq;
3023
3024
    SFLOGFLOW(("vbsf_reg_open: inode=%p file=%p flags=%#x %s\n", inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL));
2721
    SFLOGFLOW(("vbsf_reg_open: inode=%p file=%p flags=%#x %s\n", inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL));
3025
    Assert(pSuperInfo);
2722
    Assert(pSuperInfo);
3026
    Assert(sf_i);
2723
    Assert(sf_i);
3027
3028
    sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
2724
    sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
3029
    if (!sf_r) {
2725
    if (!sf_r) {
3030
        LogRelFunc(("could not allocate reg info\n"));
2726
        LogRelFunc(("could not allocate reg info\n"));
3031
        return -ENOMEM;
2727
        return -ENOMEM;
3032
    }
2728
    }
3033
3034
    RTListInit(&sf_r->Handle.Entry);
2729
    RTListInit(&sf_r->Handle.Entry);
3035
    sf_r->Handle.cRefs  = 1;
2730
    sf_r->Handle.cRefs  = 1;
3036
    sf_r->Handle.fFlags = VBSF_HANDLE_F_FILE | VBSF_HANDLE_F_MAGIC;
2731
    sf_r->Handle.fFlags = VBSF_HANDLE_F_FILE | VBSF_HANDLE_F_MAGIC;
3037
    sf_r->Handle.hHost  = SHFL_HANDLE_NIL;
2732
    sf_r->Handle.hHost  = SHFL_HANDLE_NIL;
3038
3039
    /* Already open? */
2733
    /* Already open? */
3040
    if (sf_i->handle != SHFL_HANDLE_NIL) {
2734
    if (sf_i->handle != SHFL_HANDLE_NIL) {
3041
        /*
2735
        /*
Lines 3047-3059 static int vbsf_reg_open(struct inode *i Link Here
3047
        sf_r->Handle.hHost = sf_i->handle;
2741
        sf_r->Handle.hHost = sf_i->handle;
3048
        sf_i->handle = SHFL_HANDLE_NIL;
2742
        sf_i->handle = SHFL_HANDLE_NIL;
3049
        file->private_data = sf_r;
2743
        file->private_data = sf_r;
3050
3051
        sf_r->Handle.fFlags |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE; /** @todo fix  */
2744
        sf_r->Handle.fFlags |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE; /** @todo fix  */
3052
        vbsf_handle_append(sf_i, &sf_r->Handle);
2745
        vbsf_handle_append(sf_i, &sf_r->Handle);
3053
        SFLOGFLOW(("vbsf_reg_open: returns 0 (#1) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
2746
        SFLOGFLOW(("vbsf_reg_open: returns 0 (#1) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
3054
        return 0;
2747
        return 0;
3055
    }
2748
    }
3056
3057
    pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + sf_i->path->u16Size);
2749
    pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + sf_i->path->u16Size);
3058
    if (!pReq) {
2750
    if (!pReq) {
3059
        kfree(sf_r);
2751
        kfree(sf_r);
Lines 3063-3076 static int vbsf_reg_open(struct inode *i Link Here
3063
    memcpy(&pReq->StrPath, sf_i->path, SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size);
2755
    memcpy(&pReq->StrPath, sf_i->path, SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size);
3064
    RT_ZERO(pReq->CreateParms);
2756
    RT_ZERO(pReq->CreateParms);
3065
    pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
2757
    pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
3066
3067
    /* We check the value of pReq->CreateParms.Handle afterwards to
2758
    /* We check the value of pReq->CreateParms.Handle afterwards to
3068
     * find out if the call succeeded or failed, as the API does not seem
2759
     * find out if the call succeeded or failed, as the API does not seem
3069
     * to cleanly distinguish error and informational messages.
2760
     * to cleanly distinguish error and informational messages.
3070
     *
2761
     *
3071
     * Furthermore, we must set pReq->CreateParms.Handle to SHFL_HANDLE_NIL
2762
     * Furthermore, we must set pReq->CreateParms.Handle to SHFL_HANDLE_NIL
3072
     * to make the shared folders host service use our fMode parameter */
2763
     * to make the shared folders host service use our fMode parameter */
3073
3074
    /* We ignore O_EXCL, as the Linux kernel seems to call create
2764
    /* We ignore O_EXCL, as the Linux kernel seems to call create
3075
       beforehand itself, so O_EXCL should always fail. */
2765
       beforehand itself, so O_EXCL should always fail. */
3076
    pReq->CreateParms.CreateFlags = vbsf_linux_oflags_to_vbox(file->f_flags & ~O_EXCL, &sf_r->Handle.fFlags, __FUNCTION__);
2766
    pReq->CreateParms.CreateFlags = vbsf_linux_oflags_to_vbox(file->f_flags & ~O_EXCL, &sf_r->Handle.fFlags, __FUNCTION__);
Lines 3084-3090 static int vbsf_reg_open(struct inode *i Link Here
3084
        VbglR0PhysHeapFree(pReq);
2774
        VbglR0PhysHeapFree(pReq);
3085
        return -RTErrConvertToErrno(rc);
2775
        return -RTErrConvertToErrno(rc);
3086
    }
2776
    }
3087
3088
    if (pReq->CreateParms.Handle != SHFL_HANDLE_NIL) {
2777
    if (pReq->CreateParms.Handle != SHFL_HANDLE_NIL) {
3089
        vbsf_dentry_chain_increase_ttl(dentry);
2778
        vbsf_dentry_chain_increase_ttl(dentry);
3090
        vbsf_update_inode(inode, sf_i, &pReq->CreateParms.Info, pSuperInfo, false /*fInodeLocked*/, 0 /*fSetAttrs*/);
2779
        vbsf_update_inode(inode, sf_i, &pReq->CreateParms.Info, pSuperInfo, false /*fInodeLocked*/, 0 /*fSetAttrs*/);
Lines 3111-3117 static int vbsf_reg_open(struct inode *i Link Here
3111
                break;
2800
                break;
3112
        }
2801
        }
3113
    }
2802
    }
3114
3115
    sf_r->Handle.hHost = pReq->CreateParms.Handle;
2803
    sf_r->Handle.hHost = pReq->CreateParms.Handle;
3116
    file->private_data = sf_r;
2804
    file->private_data = sf_r;
3117
    vbsf_handle_append(sf_i, &sf_r->Handle);
2805
    vbsf_handle_append(sf_i, &sf_r->Handle);
Lines 3119-3126 static int vbsf_reg_open(struct inode *i Link Here
3119
    SFLOGFLOW(("vbsf_reg_open: returns 0 (#2) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
2807
    SFLOGFLOW(("vbsf_reg_open: returns 0 (#2) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
3120
    return rc_linux;
2808
    return rc_linux;
3121
}
2809
}
3122
3123
3124
/**
2810
/**
3125
 * Close a regular file.
2811
 * Close a regular file.
3126
 *
2812
 *
Lines 3132-3144 static int vbsf_reg_release(struct inode Link Here
3132
{
2818
{
3133
    struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
2819
    struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
3134
    struct vbsf_reg_info   *sf_r = file->private_data;
2820
    struct vbsf_reg_info   *sf_r = file->private_data;
3135
3136
    SFLOGFLOW(("vbsf_reg_release: inode=%p file=%p\n", inode, file));
2821
    SFLOGFLOW(("vbsf_reg_release: inode=%p file=%p\n", inode, file));
3137
    if (sf_r) {
2822
    if (sf_r) {
3138
        struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
2823
        struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
3139
        struct address_space   *mapping    = inode->i_mapping;
2824
        struct address_space   *mapping    = inode->i_mapping;
3140
        Assert(pSuperInfo);
2825
        Assert(pSuperInfo);
3141
3142
        /* If we're closing the last handle for this inode, make sure the flush
2826
        /* If we're closing the last handle for this inode, make sure the flush
3143
           the mapping or we'll end up in vbsf_writepage without a handle. */
2827
           the mapping or we'll end up in vbsf_writepage without a handle. */
3144
        if (   mapping
2828
        if (   mapping
Lines 3152-3168 static int vbsf_reg_release(struct inode Link Here
3152
#endif
2836
#endif
3153
                filemap_fdatawait(inode->i_mapping);
2837
                filemap_fdatawait(inode->i_mapping);
3154
        }
2838
        }
3155
3156
        /* Release sf_r, closing the handle if we're the last user. */
2839
        /* Release sf_r, closing the handle if we're the last user. */
3157
        file->private_data = NULL;
2840
        file->private_data = NULL;
3158
        vbsf_handle_release(&sf_r->Handle, pSuperInfo, "vbsf_reg_release");
2841
        vbsf_handle_release(&sf_r->Handle, pSuperInfo, "vbsf_reg_release");
3159
3160
        sf_i->handle = SHFL_HANDLE_NIL;
2842
        sf_i->handle = SHFL_HANDLE_NIL;
3161
    }
2843
    }
3162
    return 0;
2844
    return 0;
3163
}
2845
}
3164
3165
3166
/**
2846
/**
3167
 * Wrapper around generic/default seek function that ensures that we've got
2847
 * Wrapper around generic/default seek function that ensures that we've got
3168
 * the up-to-date file size when doing anything relative to EOF.
2848
 * the up-to-date file size when doing anything relative to EOF.
Lines 3175-3181 static int vbsf_reg_release(struct inode Link Here
3175
static loff_t vbsf_reg_llseek(struct file *file, loff_t off, int whence)
2855
static loff_t vbsf_reg_llseek(struct file *file, loff_t off, int whence)
3176
{
2856
{
3177
    SFLOGFLOW(("vbsf_reg_llseek: file=%p off=%lld whence=%d\n", file, off, whence));
2857
    SFLOGFLOW(("vbsf_reg_llseek: file=%p off=%lld whence=%d\n", file, off, whence));
3178
3179
    switch (whence) {
2858
    switch (whence) {
3180
#ifdef SEEK_HOLE
2859
#ifdef SEEK_HOLE
3181
        case SEEK_HOLE:
2860
        case SEEK_HOLE:
Lines 3190-3204 static loff_t vbsf_reg_llseek(struct fil Link Here
3190
            return rc;
2869
            return rc;
3191
        }
2870
        }
3192
    }
2871
    }
3193
3194
#if RTLNX_VER_MIN(2,4,8)
2872
#if RTLNX_VER_MIN(2,4,8)
3195
    return generic_file_llseek(file, off, whence);
2873
    return generic_file_llseek(file, off, whence);
3196
#else
2874
#else
3197
    return default_llseek(file, off, whence);
2875
    return default_llseek(file, off, whence);
3198
#endif
2876
#endif
3199
}
2877
}
3200
3201
3202
/**
2878
/**
3203
 * Flush region of file - chiefly mmap/msync.
2879
 * Flush region of file - chiefly mmap/msync.
3204
 *
2880
 *
Lines 3231-3239 static int vbsf_reg_fsync(struct file *f Link Here
3231
    int rc;
2907
    int rc;
3232
    struct inode *inode = dentry->d_inode;
2908
    struct inode *inode = dentry->d_inode;
3233
    AssertReturn(inode, -EINVAL);
2909
    AssertReturn(inode, -EINVAL);
3234
3235
    /** @todo What about file_fsync()? (<= 2.5.11) */
2910
    /** @todo What about file_fsync()? (<= 2.5.11) */
3236
3237
#  if RTLNX_VER_MIN(2,5,12)
2911
#  if RTLNX_VER_MIN(2,5,12)
3238
    rc = sync_mapping_buffers(inode->i_mapping);
2912
    rc = sync_mapping_buffers(inode->i_mapping);
3239
    if (   rc == 0
2913
    if (   rc == 0
Lines 3259-3272 static int vbsf_reg_fsync(struct file *f Link Here
3259
    if (rc == 0 && datasync)
2933
    if (rc == 0 && datasync)
3260
        rc = fsync_inode_data_buffers(inode);
2934
        rc = fsync_inode_data_buffers(inode);
3261
#   endif
2935
#   endif
3262
3263
#  endif /* < 2.5.12 */
2936
#  endif /* < 2.5.12 */
3264
    return rc;
2937
    return rc;
3265
# endif
2938
# endif
3266
}
2939
}
3267
#endif /* < 2.6.35 */
2940
#endif /* < 2.6.35 */
3268
3269
3270
#if RTLNX_VER_MIN(4,5,0)
2941
#if RTLNX_VER_MIN(4,5,0)
3271
/**
2942
/**
3272
 * Copy a datablock from one file to another on the host side.
2943
 * Copy a datablock from one file to another on the host side.
Lines 3285-3291 static ssize_t vbsf_reg_copy_file_range( Link Here
3285
        struct vbsf_super_info *pSuperInfoDst = VBSF_GET_SUPER_INFO(pInodeDst->i_sb);
2956
        struct vbsf_super_info *pSuperInfoDst = VBSF_GET_SUPER_INFO(pInodeDst->i_sb);
3286
        struct vbsf_reg_info   *pFileInfoDst  = (struct vbsf_reg_info *)pFileDst->private_data;
2957
        struct vbsf_reg_info   *pFileInfoDst  = (struct vbsf_reg_info *)pFileDst->private_data;
3287
        VBOXSFCOPYFILEPARTREQ  *pReq;
2958
        VBOXSFCOPYFILEPARTREQ  *pReq;
3288
3289
        /*
2959
        /*
3290
         * Some extra validation.
2960
         * Some extra validation.
3291
         */
2961
         */
Lines 3293-3304 static ssize_t vbsf_reg_copy_file_range( Link Here
3293
        Assert(pInodeInfoSrc->u32Magic == SF_INODE_INFO_MAGIC);
2963
        Assert(pInodeInfoSrc->u32Magic == SF_INODE_INFO_MAGIC);
3294
        AssertPtrReturn(pInodeInfoDst, -EOPNOTSUPP);
2964
        AssertPtrReturn(pInodeInfoDst, -EOPNOTSUPP);
3295
        Assert(pInodeInfoDst->u32Magic == SF_INODE_INFO_MAGIC);
2965
        Assert(pInodeInfoDst->u32Magic == SF_INODE_INFO_MAGIC);
3296
3297
# if RTLNX_VER_MAX(4,11,0)
2966
# if RTLNX_VER_MAX(4,11,0)
3298
        if (!S_ISREG(pInodeSrc->i_mode) || !S_ISREG(pInodeDst->i_mode))
2967
        if (!S_ISREG(pInodeSrc->i_mode) || !S_ISREG(pInodeDst->i_mode))
3299
            return S_ISDIR(pInodeSrc->i_mode) || S_ISDIR(pInodeDst->i_mode) ? -EISDIR : -EINVAL;
2968
            return S_ISDIR(pInodeSrc->i_mode) || S_ISDIR(pInodeDst->i_mode) ? -EISDIR : -EINVAL;
3300
# endif
2969
# endif
3301
3302
        /*
2970
        /*
3303
         * Allocate the request and issue it.
2971
         * Allocate the request and issue it.
3304
         */
2972
         */
Lines 3313-3319 static ssize_t vbsf_reg_copy_file_range( Link Here
3313
                cbRet = -EOPNOTSUPP;
2981
                cbRet = -EOPNOTSUPP;
3314
            else
2982
            else
3315
                cbRet = -RTErrConvertToErrno(vrc);
2983
                cbRet = -RTErrConvertToErrno(vrc);
3316
3317
            VbglR0PhysHeapFree(pReq);
2984
            VbglR0PhysHeapFree(pReq);
3318
        } else
2985
        } else
3319
            cbRet = -ENOMEM;
2986
            cbRet = -ENOMEM;
Lines 3324-3342 static ssize_t vbsf_reg_copy_file_range( Link Here
3324
    return cbRet;
2991
    return cbRet;
3325
}
2992
}
3326
#endif /* > 4.5 */
2993
#endif /* > 4.5 */
3327
3328
3329
#ifdef SFLOG_ENABLED
2994
#ifdef SFLOG_ENABLED
3330
/*
2995
/*
3331
 * This is just for logging page faults and such.
2996
 * This is just for logging page faults and such.
3332
 */
2997
 */
3333
3334
/** Pointer to the ops generic_file_mmap returns the first time it's called. */
2998
/** Pointer to the ops generic_file_mmap returns the first time it's called. */
3335
static struct vm_operations_struct const *g_pGenericFileVmOps = NULL;
2999
static struct vm_operations_struct const *g_pGenericFileVmOps = NULL;
3336
/** Merge of g_LoggingVmOpsTemplate and g_pGenericFileVmOps. */
3000
/** Merge of g_LoggingVmOpsTemplate and g_pGenericFileVmOps. */
3337
static struct vm_operations_struct        g_LoggingVmOps;
3001
static struct vm_operations_struct        g_LoggingVmOps;
3338
3339
3340
/* Generic page fault callback: */
3002
/* Generic page fault callback: */
3341
# if RTLNX_VER_MIN(4,11,0)
3003
# if RTLNX_VER_MIN(4,11,0)
3342
static vm_fault_t vbsf_vmlog_fault(struct vm_fault *vmf)
3004
static vm_fault_t vbsf_vmlog_fault(struct vm_fault *vmf)
Lines 3361-3368 static int vbsf_vmlog_fault(struct vm_ar Link Here
3361
    return rc;
3023
    return rc;
3362
}
3024
}
3363
# endif
3025
# endif
3364
3365
3366
/* Special/generic page fault handler: */
3026
/* Special/generic page fault handler: */
3367
# if RTLNX_VER_MIN(2,6,26)
3027
# if RTLNX_VER_MIN(2,6,26)
3368
# elif RTLNX_VER_MIN(2,6,1)
3028
# elif RTLNX_VER_MIN(2,6,1)
Lines 3384-3391 static struct page *vbsf_vmlog_nopage(st Link Here
3384
    return page;
3044
    return page;
3385
}
3045
}
3386
# endif /* < 2.6.26 */
3046
# endif /* < 2.6.26 */
3387
3388
3389
/* Special page fault callback for making something writable: */
3047
/* Special page fault callback for making something writable: */
3390
# if RTLNX_VER_MIN(4,11,0)
3048
# if RTLNX_VER_MIN(4,11,0)
3391
static vm_fault_t vbsf_vmlog_page_mkwrite(struct vm_fault *vmf)
3049
static vm_fault_t vbsf_vmlog_page_mkwrite(struct vm_fault *vmf)
Lines 3419-3426 static int vbsf_vmlog_page_mkwrite(struc Link Here
3419
    return rc;
3077
    return rc;
3420
}
3078
}
3421
# endif
3079
# endif
3422
3423
3424
/* Special page fault callback for mapping pages: */
3080
/* Special page fault callback for mapping pages: */
3425
# if RTLNX_VER_MIN(5,12,0)
3081
# if RTLNX_VER_MIN(5,12,0)
3426
static vm_fault_t vbsf_vmlog_map_pages(struct vm_fault *vmf, pgoff_t start, pgoff_t end)
3082
static vm_fault_t vbsf_vmlog_map_pages(struct vm_fault *vmf, pgoff_t start, pgoff_t end)
Lines 3453-3460 static void vbsf_vmlog_map_pages(struct Link Here
3453
    SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
3109
    SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
3454
}
3110
}
3455
# endif
3111
# endif
3456
3457
3458
/** Overload template. */
3112
/** Overload template. */
3459
static struct vm_operations_struct const g_LoggingVmOpsTemplate = {
3113
static struct vm_operations_struct const g_LoggingVmOpsTemplate = {
3460
# if RTLNX_VER_MIN(2,6,23)
3114
# if RTLNX_VER_MIN(2,6,23)
Lines 3470-3476 static struct vm_operations_struct const Link Here
3470
    .map_pages = vbsf_vmlog_map_pages,
3124
    .map_pages = vbsf_vmlog_map_pages,
3471
# endif
3125
# endif
3472
};
3126
};
3473
3474
/** file_operations::mmap wrapper for logging purposes. */
3127
/** file_operations::mmap wrapper for logging purposes. */
3475
extern int vbsf_reg_mmap(struct file *file, struct vm_area_struct *vma)
3128
extern int vbsf_reg_mmap(struct file *file, struct vm_area_struct *vma)
3476
{
3129
{
Lines 3500-3509 extern int vbsf_reg_mmap(struct file *fi Link Here
3500
    SFLOGFLOW(("vbsf_reg_mmap: returns %d\n", rc));
3153
    SFLOGFLOW(("vbsf_reg_mmap: returns %d\n", rc));
3501
    return rc;
3154
    return rc;
3502
}
3155
}
3503
3504
#endif /* SFLOG_ENABLED */
3156
#endif /* SFLOG_ENABLED */
3505
3506
3507
/**
3157
/**
3508
 * File operations for regular files.
3158
 * File operations for regular files.
3509
 *
3159
 *
Lines 3555-3562 struct file_operations vbsf_reg_fops = { Link Here
3555
    .copy_file_range = vbsf_reg_copy_file_range,
3205
    .copy_file_range = vbsf_reg_copy_file_range,
3556
#endif
3206
#endif
3557
};
3207
};
3558
3559
3560
/**
3208
/**
3561
 * Inodes operations for regular files.
3209
 * Inodes operations for regular files.
3562
 */
3210
 */
Lines 3568-3580 struct inode_operations vbsf_reg_iops = Link Here
3568
#endif
3216
#endif
3569
    .setattr    = vbsf_inode_setattr,
3217
    .setattr    = vbsf_inode_setattr,
3570
};
3218
};
3571
3572
3573
3574
/*********************************************************************************************************************************
3219
/*********************************************************************************************************************************
3575
*   Address Space Operations on Regular Files (for mmap, sendfile, direct I/O)                                                   *
3220
*   Address Space Operations on Regular Files (for mmap, sendfile, direct I/O)                                                   *
3576
*********************************************************************************************************************************/
3221
*********************************************************************************************************************************/
3577
3578
/**
3222
/**
3579
 * Used to read the content of a page into the page cache.
3223
 * Used to read the content of a page into the page cache.
3580
 *
3224
 *
Lines 3585-3599 static int vbsf_readpage(struct file *fi Link Here
3585
{
3229
{
3586
    struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
3230
    struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
3587
    int           err;
3231
    int           err;
3588
3589
    SFLOGFLOW(("vbsf_readpage: inode=%p file=%p page=%p off=%#llx\n", inode, file, page, (uint64_t)page->index << PAGE_SHIFT));
3232
    SFLOGFLOW(("vbsf_readpage: inode=%p file=%p page=%p off=%#llx\n", inode, file, page, (uint64_t)page->index << PAGE_SHIFT));
3590
    Assert(PageLocked(page));
3233
    Assert(PageLocked(page));
3591
3592
    if (PageUptodate(page)) {
3234
    if (PageUptodate(page)) {
3593
        unlock_page(page);
3235
        unlock_page(page);
3594
        return 0;
3236
        return 0;
3595
    }
3237
    }
3596
3597
    if (!is_bad_inode(inode)) {
3238
    if (!is_bad_inode(inode)) {
3598
        VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
3239
        VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
3599
        if (pReq) {
3240
        if (pReq) {
Lines 3601-3607 static int vbsf_readpage(struct file *fi Link Here
3601
            struct vbsf_reg_info   *sf_r       = file->private_data;
3242
            struct vbsf_reg_info   *sf_r       = file->private_data;
3602
            uint32_t                cbRead;
3243
            uint32_t                cbRead;
3603
            int                     vrc;
3244
            int                     vrc;
3604
3605
            pReq->PgLst.offFirstPage = 0;
3245
            pReq->PgLst.offFirstPage = 0;
3606
            pReq->PgLst.aPages[0]    = page_to_phys(page);
3246
            pReq->PgLst.aPages[0]    = page_to_phys(page);
3607
            vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root,
3247
            vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root,
Lines 3610-3620 static int vbsf_readpage(struct file *fi Link Here
3610
                                           (uint64_t)page->index << PAGE_SHIFT,
3250
                                           (uint64_t)page->index << PAGE_SHIFT,
3611
                                           PAGE_SIZE,
3251
                                           PAGE_SIZE,
3612
                                           1 /*cPages*/);
3252
                                           1 /*cPages*/);
3613
3614
            cbRead = pReq->Parms.cb32Read.u.value32;
3253
            cbRead = pReq->Parms.cb32Read.u.value32;
3615
            AssertStmt(cbRead <= PAGE_SIZE, cbRead = PAGE_SIZE);
3254
            AssertStmt(cbRead <= PAGE_SIZE, cbRead = PAGE_SIZE);
3616
            VbglR0PhysHeapFree(pReq);
3255
            VbglR0PhysHeapFree(pReq);
3617
3618
            if (RT_SUCCESS(vrc)) {
3256
            if (RT_SUCCESS(vrc)) {
3619
                if (cbRead == PAGE_SIZE) {
3257
                if (cbRead == PAGE_SIZE) {
3620
                    /* likely */
3258
                    /* likely */
Lines 3624-3630 static int vbsf_readpage(struct file *fi Link Here
3624
                    kunmap(page);
3262
                    kunmap(page);
3625
                    /** @todo truncate the inode file size? */
3263
                    /** @todo truncate the inode file size? */
3626
                }
3264
                }
3627
3628
                flush_dcache_page(page);
3265
                flush_dcache_page(page);
3629
                SetPageUptodate(page);
3266
                SetPageUptodate(page);
3630
                unlock_page(page);
3267
                unlock_page(page);
Lines 3639-3646 static int vbsf_readpage(struct file *fi Link Here
3639
    unlock_page(page);
3276
    unlock_page(page);
3640
    return err;
3277
    return err;
3641
}
3278
}
3642
3643
3644
/**
3279
/**
3645
 * Used to write out the content of a dirty page cache page to the host file.
3280
 * Used to write out the content of a dirty page cache page to the host file.
3646
 *
3281
 *
Lines 3658-3667 static int vbsf_writepage(struct page *p Link Here
3658
    struct vbsf_inode_info *sf_i    = VBSF_GET_INODE_INFO(inode);
3293
    struct vbsf_inode_info *sf_i    = VBSF_GET_INODE_INFO(inode);
3659
    struct vbsf_handle     *pHandle = vbsf_handle_find(sf_i, VBSF_HANDLE_F_WRITE, VBSF_HANDLE_F_APPEND);
3294
    struct vbsf_handle     *pHandle = vbsf_handle_find(sf_i, VBSF_HANDLE_F_WRITE, VBSF_HANDLE_F_APPEND);
3660
    int                     err;
3295
    int                     err;
3661
3662
    SFLOGFLOW(("vbsf_writepage: inode=%p page=%p off=%#llx pHandle=%p (%#llx)\n",
3296
    SFLOGFLOW(("vbsf_writepage: inode=%p page=%p off=%#llx pHandle=%p (%#llx)\n",
3663
               inode, page, (uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle ? pHandle->hHost : 0));
3297
               inode, page, (uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle ? pHandle->hHost : 0));
3664
3665
    if (pHandle) {
3298
    if (pHandle) {
3666
        struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
3299
        struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
3667
        VBOXSFWRITEPGLSTREQ    *pReq       = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
3300
        VBOXSFWRITEPGLSTREQ    *pReq       = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
Lines 3671-3677 static int vbsf_writepage(struct page *p Link Here
3671
            uint32_t const cbToWrite       = page->index != (cbFile >> PAGE_SHIFT) ? PAGE_SIZE
3304
            uint32_t const cbToWrite       = page->index != (cbFile >> PAGE_SHIFT) ? PAGE_SIZE
3672
                                           : (uint32_t)cbFile & (uint32_t)PAGE_OFFSET_MASK;
3305
                                           : (uint32_t)cbFile & (uint32_t)PAGE_OFFSET_MASK;
3673
            int            vrc;
3306
            int            vrc;
3674
3675
            pReq->PgLst.offFirstPage = 0;
3307
            pReq->PgLst.offFirstPage = 0;
3676
            pReq->PgLst.aPages[0]    = page_to_phys(page);
3308
            pReq->PgLst.aPages[0]    = page_to_phys(page);
3677
            vrc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root,
3309
            vrc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root,
Lines 3685-3691 static int vbsf_writepage(struct page *p Link Here
3685
                          ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite),
3317
                          ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite),
3686
                          vrc = VERR_WRITE_ERROR);
3318
                          vrc = VERR_WRITE_ERROR);
3687
            VbglR0PhysHeapFree(pReq);
3319
            VbglR0PhysHeapFree(pReq);
3688
3689
            if (RT_SUCCESS(vrc)) {
3320
            if (RT_SUCCESS(vrc)) {
3690
                /* Update the inode if we've extended the file. */
3321
                /* Update the inode if we've extended the file. */
3691
                /** @todo is this necessary given the cbToWrite calc above? */
3322
                /** @todo is this necessary given the cbToWrite calc above? */
Lines 3693-3709 static int vbsf_writepage(struct page *p Link Here
3693
                if (   offEndOfWrite > cbFile
3324
                if (   offEndOfWrite > cbFile
3694
                    && offEndOfWrite > i_size_read(inode))
3325
                    && offEndOfWrite > i_size_read(inode))
3695
                    i_size_write(inode, offEndOfWrite);
3326
                    i_size_write(inode, offEndOfWrite);
3696
3697
                /* Update and unlock the page. */
3327
                /* Update and unlock the page. */
3698
                if (PageError(page))
3328
                if (PageError(page))
3699
                    ClearPageError(page);
3329
                    ClearPageError(page);
3700
                SetPageUptodate(page);
3330
                SetPageUptodate(page);
3701
                unlock_page(page);
3331
                unlock_page(page);
3702
3703
                vbsf_handle_release(pHandle, pSuperInfo, "vbsf_writepage");
3332
                vbsf_handle_release(pHandle, pSuperInfo, "vbsf_writepage");
3704
                return 0;
3333
                return 0;
3705
            }
3334
            }
3706
3707
            /*
3335
            /*
3708
             * We failed.
3336
             * We failed.
3709
             */
3337
             */
Lines 3722-3729 static int vbsf_writepage(struct page *p Link Here
3722
    unlock_page(page);
3350
    unlock_page(page);
3723
    return err;
3351
    return err;
3724
}
3352
}
3725
3726
3727
#if RTLNX_VER_MIN(2,6,24)
3353
#if RTLNX_VER_MIN(2,6,24)
3728
/**
3354
/**
3729
 * Called when writing thru the page cache (which we shouldn't be doing).
3355
 * Called when writing thru the page cache (which we shouldn't be doing).
Lines 3748-3754 int vbsf_write_begin(struct file *file, Link Here
3748
    return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
3374
    return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
3749
}
3375
}
3750
#endif /* KERNEL_VERSION >= 2.6.24 */
3376
#endif /* KERNEL_VERSION >= 2.6.24 */
3751
3752
#if RTLNX_VER_MIN(5,14,0)
3377
#if RTLNX_VER_MIN(5,14,0)
3753
/**
3378
/**
3754
 * Companion to vbsf_write_begin (i.e. shouldn't be called).
3379
 * Companion to vbsf_write_begin (i.e. shouldn't be called).
Lines 3771-3784 static int vbsf_write_end(struct file *f Link Here
3771
    return -ENOTSUPP;
3396
    return -ENOTSUPP;
3772
}
3397
}
3773
#endif /* KERNEL_VERSION >= 5.14.0 */
3398
#endif /* KERNEL_VERSION >= 5.14.0 */
3774
3775
3776
#if RTLNX_VER_MIN(2,4,10)
3399
#if RTLNX_VER_MIN(2,4,10)
3777
3778
# ifdef VBOX_UEK
3400
# ifdef VBOX_UEK
3779
#  undef iov_iter /* HACK ALERT! Don't put anything needing vbsf_iov_iter after this fun! */
3401
#  undef iov_iter /* HACK ALERT! Don't put anything needing vbsf_iov_iter after this fun! */
3780
# endif
3402
# endif
3781
3782
/**
3403
/**
3783
 * This is needed to make open accept O_DIRECT as well as dealing with direct
3404
 * This is needed to make open accept O_DIRECT as well as dealing with direct
3784
 * I/O requests if we don't intercept them earlier.
3405
 * I/O requests if we don't intercept them earlier.
Lines 3811-3819 static int vbsf_direct_IO(int rw, struct Link Here
3811
    TRACE();
3432
    TRACE();
3812
    return -EINVAL;
3433
    return -EINVAL;
3813
}
3434
}
3814
3815
#endif
3435
#endif
3816
3817
/**
3436
/**
3818
 * Address space (for the page cache) operations for regular files.
3437
 * Address space (for the page cache) operations for regular files.
3819
 *
3438
 *
Lines 3823-3829 struct address_space_operations vbsf_reg Link Here
3823
    .readpage       = vbsf_readpage,
3442
    .readpage       = vbsf_readpage,
3824
    .writepage      = vbsf_writepage,
3443
    .writepage      = vbsf_writepage,
3825
    /** @todo Need .writepages if we want msync performance...  */
3444
    /** @todo Need .writepages if we want msync performance...  */
3826
#if RTLNX_VER_MIN(2,5,12)
3445
#if RTLNX_VER_MIN(5,18,0)
3446
    .dirty_folio = filemap_dirty_folio,
3447
#elif RTLNX_VER_MIN(2,5,12)
3827
    .set_page_dirty = __set_page_dirty_buffers,
3448
    .set_page_dirty = __set_page_dirty_buffers,
3828
#endif
3449
#endif
3829
#if RTLNX_VER_MIN(5,14,0)
3450
#if RTLNX_VER_MIN(5,14,0)
Lines 3840-3843 struct address_space_operations vbsf_reg Link Here
3840
    .direct_IO      = vbsf_direct_IO,
3461
    .direct_IO      = vbsf_direct_IO,
3841
#endif
3462
#endif
3842
};
3463
};
3843

Return to bug 847097