Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 780752 Details for
Bug 847097
app-emulation/virtualbox-guest-additions-6.1.34 fails to build with 5.18.0 'struct address_space_operations' has no member named 'set_page_dirty'
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
backported virtualbox patch for vbox 6.1.34 and linux 5.18
virtualbox-6.1.34-linux-5.18.patch (text/plain), 71.53 KB, created by
Mike Pagano
on 2022-05-25 14:36:23 UTC
(
hide
)
Description:
backported virtualbox patch for vbox 6.1.34 and linux 5.18
Filename:
MIME Type:
Creator:
Mike Pagano
Created:
2022-05-25 14:36:23 UTC
Size:
71.53 KB
patch
obsolete
>--- a/src/VBox/Additions/linux/sharedfolders/regops.c 2022-05-25 09:35:31.510921282 -0400 >+++ b/src/VBox/Additions/linux/sharedfolders/regops.c 2022-05-25 09:37:18.664142178 -0400 >@@ -1,10 +1,9 @@ >-/* $Id: regops.c $ */ >+/* $Id$ */ > /** @file > * vboxsf - VBox Linux Shared Folders VFS, regular file inode and file operations. > */ >- > /* >- * Copyright (C) 2006-2020 Oracle Corporation >+ * Copyright (C) 2006-2022 Oracle Corporation > * > * Permission is hereby granted, free of charge, to any person > * obtaining a copy of this software and associated documentation >@@ -27,8 +26,6 @@ > * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR > * OTHER DEALINGS IN THE SOFTWARE. > */ >- >- > /********************************************************************************************************************************* > * Header Files * > *********************************************************************************************************************************/ >@@ -53,30 +50,23 @@ > # include <linux/swap.h> /* for mark_page_accessed */ > #endif > #include <iprt/err.h> >- > #if RTLNX_VER_MAX(2,6,18) > # define SEEK_END 2 > #endif >- > #if RTLNX_VER_MAX(3,16,0) > # define iter_is_iovec(a_pIter) ( !((a_pIter)->type & ITER_KVEC) ) > #elif RTLNX_VER_MAX(3,19,0) > # define iter_is_iovec(a_pIter) ( !((a_pIter)->type & (ITER_KVEC | ITER_BVEC)) ) > #endif >- > #if RTLNX_VER_MAX(4,17,0) > # define vm_fault_t int > #endif >- > #if RTLNX_VER_MAX(2,5,20) > # define pgoff_t unsigned long > #endif >- > #if RTLNX_VER_MAX(2,5,12) > # define PageUptodate(a_pPage) Page_Uptodate(a_pPage) > #endif >- >- > /********************************************************************************************************************************* > * Defined Constants And Macros * > *********************************************************************************************************************************/ >@@ -87,8 +77,6 @@ > #else > # define VBSF_GET_ITER_TYPE(a_pIter) ((a_pIter)->type) > #endif >- >- > /********************************************************************************************************************************* > * Structures and Typedefs * > *********************************************************************************************************************************/ >@@ -114,7 +102,6 @@ struct vbsf_iov_iter { > # define ITER_KVEC 1 > # define iov_iter vbsf_iov_iter > #endif >- > #if RTLNX_VER_MIN(2,6,19) > /** Used by vbsf_iter_lock_pages() to keep the first page of the next segment. */ > struct vbsf_iter_stash { >@@ -133,8 +120,6 @@ struct vbsf_iter_stash { > #else > # define VBSF_ITER_STASH_INITIALIZER { NULL, 0, ~(size_t)0 } > #endif >- >- > /********************************************************************************************************************************* > * Internal Functions * > *********************************************************************************************************************************/ >@@ -143,13 +128,10 @@ static void vbsf_unlock_user_pages(struc > static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange, > uint8_t const *pbSrcBuf, struct page **papSrcPages, > uint32_t offSrcPage, size_t cSrcPages); >- >- > /********************************************************************************************************************************* > * Provide more recent uio.h functionality to older kernels. * > *********************************************************************************************************************************/ > #if RTLNX_VER_RANGE(2,6,19, 3,16,0) >- > /** > * Detects the vector type. > */ >@@ -171,8 +153,6 @@ static int vbsf_iov_iter_detect_type(str > } > return 0; > } >- >- > # undef iov_iter_count > # define iov_iter_count(a_pIter) vbsf_iov_iter_count(a_pIter) > static size_t vbsf_iov_iter_count(struct vbsf_iov_iter const *iter) >@@ -186,8 +166,6 @@ static size_t vbsf_iov_iter_count(struct > } > return cbRet - iter->iov_offset; > } >- >- > # undef iov_iter_single_seg_count > # define iov_iter_single_seg_count(a_pIter) vbsf_iov_iter_single_seg_count(a_pIter) > static size_t vbsf_iov_iter_single_seg_count(struct vbsf_iov_iter const *iter) >@@ -196,8 +174,6 @@ static size_t vbsf_iov_iter_single_seg_c > return iter->iov->iov_len - iter->iov_offset; > return 0; > } >- >- > # undef iov_iter_advance > # define iov_iter_advance(a_pIter, a_cbSkip) vbsf_iov_iter_advance(a_pIter, a_cbSkip) > static void vbsf_iov_iter_advance(struct vbsf_iov_iter *iter, size_t cbSkip) >@@ -226,8 +202,6 @@ static void vbsf_iov_iter_advance(struct > } > } > } >- >- > # undef iov_iter_get_pages > # define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \ > vbsf_iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) >@@ -244,7 +218,6 @@ static ssize_t vbsf_iov_iter_get_pages(s > size_t cPages = RT_MIN(cPagesLeft, cMaxPages); > struct task_struct *pTask = current; > size_t cPagesLocked; >- > down_read(&pTask->mm->mmap_sem); > cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, iter->v_write, 1 /*force*/, papPages, NULL); > up_read(&pTask->mm->mmap_sem); >@@ -269,8 +242,6 @@ static ssize_t vbsf_iov_iter_get_pages(s > AssertFailed(); > return 0; > } >- >- > # undef iov_iter_truncate > # define iov_iter_truncate(iter, cbNew) vbsf_iov_iter_truncate(iter, cbNew) > static void vbsf_iov_iter_truncate(struct vbsf_iov_iter *iter, size_t cbNew) >@@ -278,8 +249,6 @@ static void vbsf_iov_iter_truncate(struc > /* we have no counter or stuff, so it's a no-op. */ > RT_NOREF(iter, cbNew); > } >- >- > # undef iov_iter_revert > # define iov_iter_revert(a_pIter, a_cbRewind) vbsf_iov_iter_revert(a_pIter, a_cbRewind) > void vbsf_iov_iter_revert(struct vbsf_iov_iter *iter, size_t cbRewind) >@@ -293,15 +262,12 @@ void vbsf_iov_iter_revert(struct vbsf_io > cbRewind -= iter->iov_offset; > iter->iov_offset = 0; > } >- > while (cbRewind > 0) { > struct iovec const *pIov = --iter->iov; > size_t const cbSeg = pIov->iov_len; > iter->nr_segs++; >- > Assert((uintptr_t)pIov >= (uintptr_t)iter->iov_org); > Assert(iter->nr_segs <= iter->nr_segs_org); >- > if (cbRewind <= cbSeg) { > iter->iov_offset = cbSeg - cbRewind; > break; >@@ -309,10 +275,8 @@ void vbsf_iov_iter_revert(struct vbsf_io > cbRewind -= cbSeg; > } > } >- > #endif /* 2.6.19 <= linux < 3.16.0 */ > #if RTLNX_VER_RANGE(3,16,0, 3,16,35) >- > /** This is for implementing cMaxPage on 3.16 which doesn't have it. */ > static ssize_t vbsf_iov_iter_get_pages_3_16(struct iov_iter *iter, struct page **papPages, > size_t cbMax, unsigned cMaxPages, size_t *poffPg0) >@@ -329,10 +293,8 @@ static ssize_t vbsf_iov_iter_get_pages_3 > # undef iov_iter_get_pages > # define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \ > vbsf_iov_iter_get_pages_3_16(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) >- > #endif /* 3.16.0-3.16.34 */ > #if RTLNX_VER_RANGE(2,6,19, 3,18,0) >- > static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter) > { > size_t const cbTotal = cbToCopy; >@@ -370,8 +332,6 @@ static size_t copy_from_iter(uint8_t *pb > } > return cbTotal - cbToCopy; > } >- >- > static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter) > { > size_t const cbTotal = cbToCopy; >@@ -410,15 +370,10 @@ static size_t copy_to_iter(uint8_t const > } > return cbTotal - cbToCopy; > } >- > #endif /* 3.16.0 <= linux < 3.18.0 */ >- >- >- > /********************************************************************************************************************************* > * Handle management * > *********************************************************************************************************************************/ >- > /** > * Called when an inode is released to unlink all handles that might impossibly > * still be associated with it. >@@ -431,18 +386,14 @@ void vbsf_handle_drop_chain(struct vbsf_ > unsigned long fSavedFlags; > SFLOGFLOW(("vbsf_handle_drop_chain: %p\n", pInodeInfo)); > spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); >- > RTListForEachSafe(&pInodeInfo->HandleList, pCur, pNext, struct vbsf_handle, Entry) { > AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) > == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags)); > pCur->fFlags |= VBSF_HANDLE_F_ON_LIST; > RTListNodeRemove(&pCur->Entry); > } >- > spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); > } >- >- > /** > * Locates a handle that matches all the flags in @a fFlags. > * >@@ -457,7 +408,6 @@ struct vbsf_handle *vbsf_handle_find(str > struct vbsf_handle *pCur; > unsigned long fSavedFlags; > spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); >- > RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) { > AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) > == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags)); >@@ -472,13 +422,10 @@ struct vbsf_handle *vbsf_handle_find(str > ASMAtomicDecU32(&pCur->cRefs); > } > } >- > spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); > SFLOGFLOW(("vbsf_handle_find: returns NULL!\n")); > return NULL; > } >- >- > /** > * Slow worker for vbsf_handle_release() that does the freeing. > * >@@ -492,25 +439,19 @@ uint32_t vbsf_handle_release_slow(struct > { > int rc; > unsigned long fSavedFlags; >- > SFLOGFLOW(("vbsf_handle_release_slow: %p (%s)\n", pHandle, pszCaller)); >- > /* > * Remove from the list. > */ > spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); >- > AssertMsg((pHandle->fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC, ("%p %#x\n", pHandle, pHandle->fFlags)); > Assert(pHandle->pInodeInfo); > Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC); >- > if (pHandle->fFlags & VBSF_HANDLE_F_ON_LIST) { > pHandle->fFlags &= ~VBSF_HANDLE_F_ON_LIST; > RTListNodeRemove(&pHandle->Entry); > } >- > spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); >- > /* > * Actually destroy it. > */ >@@ -522,8 +463,6 @@ uint32_t vbsf_handle_release_slow(struct > kfree(pHandle); > return 0; > } >- >- > /** > * Appends a handle to a handle list. > * >@@ -536,14 +475,11 @@ void vbsf_handle_append(struct vbsf_inod > struct vbsf_handle *pCur; > #endif > unsigned long fSavedFlags; >- > SFLOGFLOW(("vbsf_handle_append: %p (to %p)\n", pHandle, pInodeInfo)); > AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC, > ("%p %#x\n", pHandle, pHandle->fFlags)); > Assert(pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC); >- > spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); >- > AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC, > ("%p %#x\n", pHandle, pHandle->fFlags)); > #ifdef VBOX_STRICT >@@ -554,19 +490,13 @@ void vbsf_handle_append(struct vbsf_inod > } > pHandle->pInodeInfo = pInodeInfo; > #endif >- > pHandle->fFlags |= VBSF_HANDLE_F_ON_LIST; > RTListAppend(&pInodeInfo->HandleList, &pHandle->Entry); >- > spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); > } >- >- >- > /********************************************************************************************************************************* > * Misc * > *********************************************************************************************************************************/ >- > #if RTLNX_VER_MAX(2,6,6) > /** Any writable mappings? */ > DECLINLINE(bool) mapping_writably_mapped(struct address_space const *mapping) >@@ -578,8 +508,6 @@ DECLINLINE(bool) mapping_writably_mapped > # endif > } > #endif >- >- > #if RTLNX_VER_MAX(2,5,12) > /** Missing in 2.4.x, so just stub it for now. */ > DECLINLINE(bool) PageWriteback(struct page const *page) >@@ -587,8 +515,6 @@ DECLINLINE(bool) PageWriteback(struct pa > return false; > } > #endif >- >- > /** > * Helper for deciding wheter we should do a read via the page cache or not. > * >@@ -609,15 +535,10 @@ DECLINLINE(bool) vbsf_should_use_cached_ > && mapping->nrpages > 0 > && mapping_writably_mapped(mapping); > } >- >- >- > /********************************************************************************************************************************* > * Pipe / splice stuff mainly for 2.6.17 >= linux < 2.6.31 (where no fallbacks were available) * > *********************************************************************************************************************************/ >- > #if RTLNX_VER_RANGE(2,6,17, 3,16,0) >- > # if RTLNX_VER_MAX(2,6,30) > # define LOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0) > # define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0) >@@ -625,8 +546,6 @@ DECLINLINE(bool) vbsf_should_use_cached_ > # define LOCK_PIPE(a_pPipe) pipe_lock(a_pPipe) > # define UNLOCK_PIPE(a_pPipe) pipe_unlock(a_pPipe) > # endif >- >- > /** Waits for the pipe buffer status to change. */ > static void vbsf_wait_pipe(struct pipe_inode_info *pPipe) > { >@@ -637,14 +556,10 @@ static void vbsf_wait_pipe(struct pipe_i > prepare_to_wait(&pPipe->wait, &WaitStuff, TASK_INTERRUPTIBLE); > # endif > UNLOCK_PIPE(pPipe); >- > schedule(); >- > finish_wait(&pPipe->wait, &WaitStuff); > LOCK_PIPE(pPipe); > } >- >- > /** Worker for vbsf_feed_pages_to_pipe that wakes up readers. */ > static void vbsf_wake_up_pipe(struct pipe_inode_info *pPipe, bool fReaders) > { >@@ -656,18 +571,14 @@ static void vbsf_wake_up_pipe(struct pip > else > kill_fasync(&pPipe->fasync_writers, SIGIO, POLL_OUT); > } >- > #endif > #if RTLNX_VER_RANGE(2,6,17, 2,6,31) >- > /** Verify pipe buffer content (needed for page-cache to ensure idle page). */ > static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) > { > /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/ > return 0; > } >- >- > /** Maps the buffer page. */ > static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic) > { >@@ -681,8 +592,6 @@ static void *vbsf_pipe_buf_map(struct pi > /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/ > return pvRet; > } >- >- > /** Unmaps the buffer page. */ > static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping) > { >@@ -694,24 +603,18 @@ static void vbsf_pipe_buf_unmap(struct p > kunmap_atomic(pvMapping, KM_USER0); > } > } >- >- > /** Gets a reference to the page. */ > static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) > { > page_cache_get(pPipeBuf->page); > /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/ > } >- >- > /** Release the buffer page (counter to vbsf_pipe_buf_get). */ > static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) > { > /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/ > page_cache_release(pPipeBuf->page); > } >- >- > /** Attempt to steal the page. > * @returns 0 success, 1 on failure. */ > static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) >@@ -724,8 +627,6 @@ static int vbsf_pipe_buf_steal(struct pi > SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf)); > return 1; > } >- >- > /** > * Pipe buffer operations for used by vbsf_feed_pages_to_pipe. > */ >@@ -742,8 +643,6 @@ static struct pipe_buf_operations vbsf_p > .release = vbsf_pipe_buf_release, > .steal = vbsf_pipe_buf_steal, > }; >- >- > /** > * Feeds the pages to the pipe. > * >@@ -755,7 +654,6 @@ static ssize_t vbsf_feed_pages_to_pipe(s > ssize_t cbRet = 0; > size_t iPage = 0; > bool fNeedWakeUp = false; >- > LOCK_PIPE(pPipe); > for (;;) { > if ( pPipe->readers > 0 >@@ -770,13 +668,11 @@ static ssize_t vbsf_feed_pages_to_pipe(s > pPipeBuf->ops = &vbsf_pipe_buf_ops; > pPipeBuf->flags = fFlags & SPLICE_F_GIFT ? PIPE_BUF_FLAG_GIFT : 0; > pPipeBuf->page = papPages[iPage]; >- > papPages[iPage++] = NULL; > pPipe->nrbufs++; > fNeedWakeUp |= pPipe->inode != NULL; > offPg0 = 0; > cbRet += cbThisPage; >- > /* done? */ > cbActual -= cbThisPage; > if (!cbActual) >@@ -807,14 +703,10 @@ static ssize_t vbsf_feed_pages_to_pipe(s > } > } > UNLOCK_PIPE(pPipe); >- > if (fNeedWakeUp) > vbsf_wake_up_pipe(pPipe, true /*fReaders*/); >- > return cbRet; > } >- >- > /** > * For splicing from a file to a pipe. > */ >@@ -823,7 +715,6 @@ static ssize_t vbsf_splice_read(struct f > struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode; > struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb); > ssize_t cbRet; >- > SFLOGFLOW(("vbsf_splice_read: file=%p poffset=%p{%#RX64} pipe=%p len=%#zx flags=%#x\n", file, poffset, *poffset, pipe, len, flags)); > if (vbsf_should_use_cached_read(file, inode->i_mapping, pSuperInfo)) { > cbRet = generic_file_splice_read(file, poffset, pipe, len, flags); >@@ -873,10 +764,8 @@ static ssize_t vbsf_splice_read(struct f > uint32_t cbActual = pReq->Parms.cb32Read.u.value32; > AssertStmt(cbActual <= cbToRead, cbActual = cbToRead); > SFLOG2(("vbsf_splice_read: read -> %#x bytes @ %#RX64\n", cbActual, offFile)); >- > VbglR0PhysHeapFree(pReq); > pReq = NULL; >- > /* > * Now, feed it to the pipe thingy. > * This will take ownership of the all pages no matter what happens. >@@ -890,7 +779,6 @@ static ssize_t vbsf_splice_read(struct f > } > i = cPages; > } >- > while (i-- > 0) > if (apPages[i]) > __free_pages(apPages[i], 0); >@@ -903,10 +791,8 @@ static ssize_t vbsf_splice_read(struct f > SFLOGFLOW(("vbsf_splice_read: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset)); > return cbRet; > } >- > #endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31 */ > #if RTLNX_VER_RANGE(2,6,17, 3,16,0) >- > /** > * For splicing from a pipe to a file. > * >@@ -918,7 +804,6 @@ static ssize_t vbsf_splice_write(struct > struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode; > struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb); > ssize_t cbRet; >- > SFLOGFLOW(("vbsf_splice_write: pPipe=%p file=%p poffset=%p{%#RX64} len=%#zx flags=%#x\n", pPipe, file, poffset, *poffset, len, flags)); > /** @todo later if (false) { > cbRet = generic_file_splice_write(pPipe, file, poffset, len, flags); >@@ -943,9 +828,7 @@ static ssize_t vbsf_splice_write(struct > loff_t offFile = *poffset; > bool fNeedWakeUp = false; > cbRet = 0; >- > LOCK_PIPE(pPipe); >- > for (;;) { > unsigned cBufs = pPipe->nrbufs; > /*SFLOG2(("vbsf_splice_write: nrbufs=%#x curbuf=%#x\n", cBufs, pPipe->curbuf));*/ >@@ -957,13 +840,10 @@ static ssize_t vbsf_splice_write(struct > struct pipe_buffer *pPipeBuf = &pPipe->bufs[pPipe->curbuf]; > uint32_t cPagesToWrite = 1; > uint32_t cbToWrite = pPipeBuf->len; >- > Assert(pPipeBuf->offset < PAGE_SIZE); > Assert(pPipeBuf->offset + pPipeBuf->len <= PAGE_SIZE); >- > pReq->PgLst.offFirstPage = pPipeBuf->offset & PAGE_OFFSET; > pReq->PgLst.aPages[0] = page_to_phys(pPipeBuf->page); >- > /* Add any adjacent page buffers: */ > while ( cPagesToWrite < cBufs > && cPagesToWrite < cMaxPages >@@ -981,7 +861,6 @@ static ssize_t vbsf_splice_write(struct > cbToWrite += pPipeBuf2->len; > cPagesToWrite += 1; > } >- > /* Check that we don't have signals pending before we issue the write, as > we'll only end up having to cancel the HGCM request 99% of the time: */ > if (!signal_pending(current)) { >@@ -999,25 +878,19 @@ static ssize_t vbsf_splice_write(struct > uint32_t cbActual = pReq->Parms.cb32Write.u.value32; > AssertStmt(cbActual <= cbToWrite, cbActual = cbToWrite); > SFLOG2(("vbsf_splice_write: write -> %#x bytes @ %#RX64\n", cbActual, offFile)); >- > cbRet += cbActual; >- > while (cbActual > 0) { > uint32_t cbAdvance = RT_MIN(pPipeBuf->len, cbActual); >- > vbsf_reg_write_sync_page_cache(mapping, offFile, cbAdvance, NULL, > &pPipeBuf->page, pPipeBuf->offset, 1); >- > offFile += cbAdvance; > cbActual -= cbAdvance; > pPipeBuf->offset += cbAdvance; > pPipeBuf->len -= cbAdvance; >- > if (!pPipeBuf->len) { > struct pipe_buf_operations const *pOps = pPipeBuf->ops; > pPipeBuf->ops = NULL; > pOps->release(pPipe, pPipeBuf); >- > # ifdef PIPE_BUFFERS > pPipe->curbuf = (pPipe->curbuf + 1) % PIPE_BUFFERS; > # else >@@ -1025,7 +898,6 @@ static ssize_t vbsf_splice_write(struct > # endif > pPipe->nrbufs -= 1; > pPipeBuf = &pPipe->bufs[pPipe->curbuf]; >- > # if RTLNX_VER_MAX(2,6,30) > fNeedWakeUp |= pPipe->inode != NULL; > # else >@@ -1036,7 +908,6 @@ static ssize_t vbsf_splice_write(struct > break; > } > } >- > *poffset = offFile; > } else { > if (cbRet == 0) >@@ -1054,20 +925,17 @@ static ssize_t vbsf_splice_write(struct > SFLOGFLOW(("vbsf_splice_write: No buffers. No writers. The show is done!\n")); > break; > } >- > /* Quit if if we've written some and no writers waiting on the lock: */ > if (cbRet > 0 && pPipe->waiting_writers == 0) { > SFLOGFLOW(("vbsf_splice_write: No waiting writers, returning what we've got.\n")); > break; > } >- > /* Quit with EAGAIN if non-blocking: */ > if (flags & SPLICE_F_NONBLOCK) { > if (cbRet == 0) > cbRet = -EAGAIN; > break; > } >- > /* Quit if we've got pending signals: */ > if (signal_pending(current)) { > if (cbRet == 0) >@@ -1075,7 +943,6 @@ static ssize_t vbsf_splice_write(struct > SFLOGFLOW(("vbsf_splice_write: pending signal! (%zd)\n", cbRet)); > break; > } >- > /* Wake up writers before we start waiting: */ > if (fNeedWakeUp) { > vbsf_wake_up_pipe(pPipe, false /*fReaders*/); >@@ -1084,12 +951,9 @@ static ssize_t vbsf_splice_write(struct > vbsf_wait_pipe(pPipe); > } > } /* feed loop */ >- > if (fNeedWakeUp) > vbsf_wake_up_pipe(pPipe, false /*fReaders*/); >- > UNLOCK_PIPE(pPipe); >- > VbglR0PhysHeapFree(pReq); > } else { > cbRet = -ENOMEM; >@@ -1098,9 +962,7 @@ static ssize_t vbsf_splice_write(struct > SFLOGFLOW(("vbsf_splice_write: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset)); > return cbRet; > } >- > #endif /* 2.6.17 <= LINUX_VERSION_CODE < 3.16.0 */ >- > #if RTLNX_VER_RANGE(2,5,30, 2,6,23) > /** > * Our own senfile implementation that does not go via the page cache like >@@ -1120,13 +982,11 @@ static ssize_t vbsf_reg_sendfile(struct > SFLOGFLOW(("vbsf_reg_sendfile: pFile=%p poffFile=%p{%#RX64} cbToSend=%#zx pfnActor=%p pvUser=%p\n", > pFile, poffFile, poffFile ? *poffFile : 0, cbToSend, pfnActor, pvUser)); > Assert(pSuperInfo); >- > /* > * Return immediately if asked to send nothing. > */ > if (cbToSend == 0) > return 0; >- > /* > * Like for vbsf_reg_read() and vbsf_reg_read_iter(), we allow going via > * the page cache in some cases or configs. >@@ -1176,10 +1036,8 @@ static ssize_t vbsf_reg_sendfile(struct > # endif > RdDesc.written = 0; > RdDesc.error = 0; >- > Assert(sf_r); > Assert((sf_r->Handle.fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC); >- > while (cbToSend > 0) { > /* > * Read another chunk. For paranoid reasons, we keep data where the page cache >@@ -1204,15 +1062,12 @@ static ssize_t vbsf_reg_sendfile(struct > bool const fIsEof = cbActual < cbToRead; > AssertStmt(cbActual <= cbToRead, cbActual = cbToRead); > SFLOG3(("vbsf_reg_sendfile: Read %#x bytes (offPg0=%#x), wanted %#x ...\n", cbActual, offPg0, cbToRead)); >- > iPage = 0; > while (cbActual > 0) { > uint32_t const cbPage = RT_MIN(cbActual, PAGE_SIZE - off); > int const cbRetActor = pfnActor(&RdDesc, apPages[iPage], off, cbPage); > Assert(cbRetActor >= 0); /* Returns zero on failure, with RdDesc.error holding the status code. */ >- > AssertMsg(iPage < cPages && iPage < cPagesToRead, ("iPage=%#x cPages=%#x cPagesToRead=%#x\n", iPage, cPages, cPagesToRead)); >- > offFile += cbRetActor; > if ((uint32_t)cbRetActor == cbPage && RdDesc.count > 0) { > cbActual -= cbPage; >@@ -1226,14 +1081,12 @@ static ssize_t vbsf_reg_sendfile(struct > } > off = 0; > } >- > /* > * Are we done yet? > */ > if (RT_FAILURE_NP(vrc) || cbToSend == 0 || RdDesc.error != 0 || fIsEof) { > break; > } >- > /* > * Replace pages held by the actor. > */ >@@ -1262,13 +1115,11 @@ static ssize_t vbsf_reg_sendfile(struct > break; > } > } >- > /* > * Free memory. > */ > for (iPage = 0; iPage < cPages; iPage++) > vbsf_put_page(apPages[iPage]); >- > /* > * Set the return values. > */ >@@ -1289,12 +1140,9 @@ static ssize_t vbsf_reg_sendfile(struct > return cbRet; > } > #endif /* 2.5.30 <= LINUX_VERSION_CODE < 2.6.23 */ >- >- > /********************************************************************************************************************************* > * File operations on regular files * > *********************************************************************************************************************************/ >- > /** Wrapper around put_page / page_cache_release. */ > DECLINLINE(void) vbsf_put_page(struct page *pPage) > { >@@ -1304,8 +1152,6 @@ DECLINLINE(void) vbsf_put_page(struct pa > page_cache_release(pPage); > #endif > } >- >- > /** Wrapper around get_page / page_cache_get. */ > DECLINLINE(void) vbsf_get_page(struct page *pPage) > { >@@ -1315,15 +1161,12 @@ DECLINLINE(void) vbsf_get_page(struct pa > page_cache_get(pPage); > #endif > } >- >- > /** Companion to vbsf_lock_user_pages(). */ > static void vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack) > { > /* We don't mark kernel pages dirty: */ > if (fLockPgHack) > fSetDirty = false; >- > while (cPages-- > 0) > { > struct page *pPage = papPages[cPages]; >@@ -1333,8 +1176,6 @@ static void vbsf_unlock_user_pages(struc > vbsf_put_page(pPage); > } > } >- >- > /** > * Worker for vbsf_lock_user_pages_failed_check_kernel() and > * vbsf_iter_lock_pages(). >@@ -1345,7 +1186,6 @@ static int vbsf_lock_kernel_pages(uint8_ > uintptr_t const uPtrLast = (uPtrFrom & ~(uintptr_t)PAGE_OFFSET_MASK) + (cPages << PAGE_SHIFT) - 1; > uint8_t *pbPage = (uint8_t *)uPtrLast; > size_t iPage = cPages; >- > /* > * Touch the pages first (paranoia^2). > */ >@@ -1362,7 +1202,6 @@ static int vbsf_lock_kernel_pages(uint8_ > pbProbe += PAGE_SIZE; > } > } >- > /* > * Get the pages. > * Note! Fixes here probably applies to rtR0MemObjNativeLockKernel as well. >@@ -1395,8 +1234,6 @@ static int vbsf_lock_kernel_pages(uint8_ > } > return 0; > } >- >- > /** > * Catches kernel_read() and kernel_write() calls and works around them. > * >@@ -1434,11 +1271,8 @@ static int vbsf_lock_user_pages_failed_c > return 0; > } > } >- > return rcFailed; > } >- >- > /** Wrapper around get_user_pages. */ > DECLINLINE(int) vbsf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages, bool *pfLockPgHack) > { >@@ -1465,21 +1299,16 @@ DECLINLINE(int) vbsf_lock_user_pages(uin > *pfLockPgHack = false; > if (cPagesLocked == cPages) > return 0; >- > /* > * It failed. > */ > if (cPagesLocked < 0) > return vbsf_lock_user_pages_failed_check_kernel(uPtrFrom, cPages, fWrite, (int)cPagesLocked, papPages, pfLockPgHack); >- > vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/); >- > /* We could use uPtrFrom + cPagesLocked to get the correct status here... */ > return -EFAULT; > } >- > #if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */ >- > /** > * Read function used when accessing files that are memory mapped. > * >@@ -1493,37 +1322,27 @@ static ssize_t vbsf_reg_read_mapped(stru > struct iov_iter iter; > struct kiocb kiocb; > ssize_t cbRet; >- > init_sync_kiocb(&kiocb, file); > kiocb.ki_pos = *off; > iov_iter_init(&iter, READ, &iov, 1, size); >- > cbRet = generic_file_read_iter(&kiocb, &iter); >- > *off = kiocb.ki_pos; > return cbRet; >- > # elif RTLNX_VER_MIN(2,6,19) > struct iovec iov = { .iov_base = buf, .iov_len = size }; > struct kiocb kiocb; > ssize_t cbRet; >- > init_sync_kiocb(&kiocb, file); > kiocb.ki_pos = *off; >- > cbRet = generic_file_aio_read(&kiocb, &iov, 1, *off); > if (cbRet == -EIOCBQUEUED) > cbRet = wait_on_sync_kiocb(&kiocb); >- > *off = kiocb.ki_pos; > return cbRet; >- > # else /* 2.6.18 or earlier: */ > return generic_file_read(file, buf, size, off); > # endif > } >- >- > /** > * Fallback case of vbsf_reg_read() that locks the user buffers and let the host > * write directly to them. >@@ -1546,7 +1365,6 @@ static ssize_t vbsf_reg_read_locking(str > size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT; > size_t cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages); > bool fLockPgHack; >- > pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); > while (!pReq && cMaxPages > 4) { > cMaxPages /= 2; >@@ -1570,7 +1388,6 @@ static ssize_t vbsf_reg_read_locking(str > cPages = cMaxPages; > cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk; > } >- > rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages, &fLockPgHack); > if (rc == 0) { > size_t iPage = cPages; >@@ -1580,15 +1397,12 @@ static ssize_t vbsf_reg_read_locking(str > cbRet = rc; > break; > } >- > /* > * Issue the request and unlock the pages. > */ > rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages); >- > Assert(cPages <= cMaxPages); > vbsf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/, fLockPgHack); >- > if (RT_SUCCESS(rc)) { > /* > * Success, advance position and buffer. >@@ -1599,7 +1413,6 @@ static ssize_t vbsf_reg_read_locking(str > offFile += cbActual; > buf = (uint8_t *)buf + cbActual; > size -= cbActual; >- > /* > * Are we done already? If so commit the new file offset. > */ >@@ -1637,8 +1450,6 @@ static ssize_t vbsf_reg_read_locking(str > SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off)); > return cbRet; > } >- >- > /** > * Read from a regular file. > * >@@ -1654,19 +1465,14 @@ static ssize_t vbsf_reg_read(struct file > struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb); > struct vbsf_reg_info *sf_r = file->private_data; > struct address_space *mapping = inode->i_mapping; >- > SFLOGFLOW(("vbsf_reg_read: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off)); >- > if (!S_ISREG(inode->i_mode)) { > LogFunc(("read from non regular file %d\n", inode->i_mode)); > return -EINVAL; > } >- > /** @todo XXX Check read permission according to inode->i_mode! */ >- > if (!size) > return 0; >- > /* > * If there is a mapping and O_DIRECT isn't in effect, we must at a > * heed dirty pages in the mapping and read from them. For simplicity >@@ -1675,7 +1481,6 @@ static ssize_t vbsf_reg_read(struct file > */ > if (vbsf_should_use_cached_read(file, mapping, pSuperInfo)) > return vbsf_reg_read_mapped(file, buf, size, off); >- > /* > * For small requests, try use an embedded buffer provided we get a heap block > * that does not cross page boundraries (see host code). >@@ -1703,7 +1508,6 @@ static ssize_t vbsf_reg_read(struct file > VbglR0PhysHeapFree(pReq); > } > } >- > # if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */ > /* > * For medium sized requests try use a bounce buffer. >@@ -1734,12 +1538,9 @@ static ssize_t vbsf_reg_read(struct file > } > } > # endif >- > return vbsf_reg_read_locking(file, buf, size, off, pSuperInfo, sf_r); > } >- > #endif /* < 5.10.0 */ >- > /** > * Helper the synchronizes the page cache content with something we just wrote > * to the host. >@@ -1800,7 +1601,6 @@ static void vbsf_reg_write_sync_page_cac > unlock_page(pDstPage); > vbsf_put_page(pDstPage); > } >- > /* > * Advance. > */ >@@ -1825,9 +1625,7 @@ static void vbsf_reg_write_sync_page_cac > } > RT_NOREF(cSrcPages); > } >- > #if RTLNX_VER_MAX(5,10,0) /* No regular .read/.write for 5.10, only .read_iter/.write_iter or in-kernel reads/writes fail. */ >- > /** > * Fallback case of vbsf_reg_write() that locks the user buffers and let the host > * write directly to them. >@@ -1850,7 +1648,6 @@ static ssize_t vbsf_reg_write_locking(st > size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT; > size_t cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages); > bool fLockPgHack; >- > pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages])); > while (!pReq && cMaxPages > 4) { > cMaxPages /= 2; >@@ -1874,7 +1671,6 @@ static ssize_t vbsf_reg_write_locking(st > cPages = cMaxPages; > cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk; > } >- > rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages, &fLockPgHack); > if (rc == 0) { > size_t iPage = cPages; >@@ -1884,7 +1680,6 @@ static ssize_t vbsf_reg_write_locking(st > cbRet = rc; > break; > } >- > /* > * Issue the request and unlock the pages. > */ >@@ -1896,24 +1691,19 @@ static ssize_t vbsf_reg_write_locking(st > */ > uint32_t cbActual = pReq->Parms.cb32Write.u.value32; > AssertStmt(cbActual <= cbChunk, cbActual = cbChunk); >- > vbsf_reg_write_sync_page_cache(inode->i_mapping, offFile, cbActual, NULL /*pbKrnlBuf*/, > papPages, (uintptr_t)buf & PAGE_OFFSET_MASK, cPages); > Assert(cPages <= cMaxPages); > vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack); >- > cbRet += cbActual; > buf = (uint8_t *)buf + cbActual; > size -= cbActual; >- > offFile += cbActual; > if ((file->f_flags & O_APPEND) && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET)) > offFile = pReq->Parms.off64Write.u.value64; > if (offFile > i_size_read(inode)) > i_size_write(inode, offFile); >- > sf_i->force_restat = 1; /* mtime (and size) may have changed */ >- > /* > * Are we done already? If so commit the new file offset. > */ >@@ -1954,8 +1744,6 @@ static ssize_t vbsf_reg_write_locking(st > SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off)); > return cbRet; > } >- >- > /** > * Write to a regular file. > * >@@ -1973,27 +1761,21 @@ static ssize_t vbsf_reg_write(struct fil > struct vbsf_reg_info *sf_r = file->private_data; > struct address_space *mapping = inode->i_mapping; > loff_t pos; >- > SFLOGFLOW(("vbsf_reg_write: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off)); > Assert(sf_i); > Assert(pSuperInfo); > Assert(sf_r); > AssertReturn(S_ISREG(inode->i_mode), -EINVAL); >- > pos = *off; > if (file->f_flags & O_APPEND) > pos = i_size_read(inode); >- > /** @todo XXX Check write permission according to inode->i_mode! */ >- > if (!size) { > if (file->f_flags & O_APPEND) /** @todo check if this is the consensus behavior... */ > *off = pos; > return 0; > } >- > /** @todo Implement the read-write caching mode. */ >- > /* > * If there are active writable mappings, coordinate with any > * pending writes via those. >@@ -2009,7 +1791,6 @@ static ssize_t vbsf_reg_write(struct fil > /** @todo ... */ > # endif > } >- > /* > * For small requests, try use an embedded buffer provided we get a heap block > * that does not cross page boundraries (see host code). >@@ -2040,7 +1821,6 @@ static ssize_t vbsf_reg_write(struct fil > sf_i->force_restat = 1; /* mtime (and size) may have changed */ > } else > cbRet = -EFAULT; >- > VbglR0PhysHeapFree(pReq); > SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [embed]\n", cbRet, cbRet, *off)); > return cbRet; >@@ -2048,7 +1828,6 @@ static ssize_t vbsf_reg_write(struct fil > if (pReq) > VbglR0PhysHeapFree(pReq); > } >- > # if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */ > /* > * For medium sized requests try use a bounce buffer. >@@ -2089,13 +1868,10 @@ static ssize_t vbsf_reg_write(struct fil > } > } > # endif >- > return vbsf_reg_write_locking(file, buf, size, off, pos, inode, sf_i, pSuperInfo, sf_r); > } >- > #endif /* < 5.10.0 */ > #if RTLNX_VER_MIN(2,6,19) >- > /** > * Companion to vbsf_iter_lock_pages(). > */ >@@ -2104,7 +1880,6 @@ DECLINLINE(void) vbsf_iter_unlock_pages( > /* We don't mark kernel pages dirty (KVECs, BVECs, PIPEs): */ > if (!iter_is_iovec(iter)) > fSetDirty = false; >- > while (cPages-- > 0) > { > struct page *pPage = papPages[cPages]; >@@ -2113,8 +1888,6 @@ DECLINLINE(void) vbsf_iter_unlock_pages( > vbsf_put_page(pPage); > } > } >- >- > /** > * Locks up to @a cMaxPages from the I/O vector iterator, advancing the > * iterator. >@@ -2136,7 +1909,6 @@ static int vbsf_iter_lock_pages(struct i > size_t cPages = 0; > size_t offPage0 = 0; > int rc = 0; >- > Assert(iov_iter_count(iter) + pStash->cb > 0); > if (!(VBSF_GET_ITER_TYPE(iter) & ITER_KVEC)) { > /* >@@ -2170,7 +1942,6 @@ static int vbsf_iter_lock_pages(struct i > pStash->Copy = *iter; > # endif > } >- > /* > * Get pages segment by segment. > */ >@@ -2223,7 +1994,6 @@ static int vbsf_iter_lock_pages(struct i > if ( cMaxPages == 0 > || cbSegRet != PAGE_SIZE) > break; >- > /* > * Get the rest of the segment (if anything remaining). > */ >@@ -2265,7 +2035,6 @@ static int vbsf_iter_lock_pages(struct i > } > Assert(cMaxPages > 0); > } while (iov_iter_count(iter) > 0); >- > } else { > /* > * The silly iov_iter_get_pages_alloc() function doesn't handle KVECs, >@@ -2282,13 +2051,11 @@ static int vbsf_iter_lock_pages(struct i > uint8_t *pbBuf; > size_t offStart; > size_t cPgSeg; >- > size_t cbSeg = iov_iter_single_seg_count(iter); > while (!cbSeg) { > iov_iter_advance(iter, 0); > cbSeg = iov_iter_single_seg_count(iter); > } >- > # if RTLNX_VER_MIN(3,19,0) > pbBuf = iter->kvec->iov_base + iter->iov_offset; > # else >@@ -2299,13 +2066,11 @@ static int vbsf_iter_lock_pages(struct i > offPage0 = offStart; > else if (offStart) > break; >- > cPgSeg = RT_ALIGN_Z(cbSeg, PAGE_SIZE) >> PAGE_SHIFT; > if (cPgSeg > cMaxPages) { > cPgSeg = cMaxPages; > cbSeg = (cPgSeg << PAGE_SHIFT) - offStart; > } >- > rc = vbsf_lock_kernel_pages(pbBuf, fWrite, cPgSeg, &papPages[cPages]); > if (rc == 0) { > iov_iter_advance(iter, cbSeg); >@@ -2319,7 +2084,6 @@ static int vbsf_iter_lock_pages(struct i > break; > } while (iov_iter_count(iter) > 0); > } >- > /* > * Clean up if we failed; set return values. > */ >@@ -2336,8 +2100,6 @@ static int vbsf_iter_lock_pages(struct i > SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx\n", rc, cPages, offPage0, cbChunk)); > return rc; > } >- >- > /** > * Rewinds the I/O vector. > */ >@@ -2353,7 +2115,6 @@ static bool vbsf_iter_rewind(struct iov_ > pStash->cb = 0; > pStash->off = 0; > } >- > # if RTLNX_VER_MIN(4,11,0) || RTLNX_VER_MAX(3,16,0) > iov_iter_revert(iter, cbToRewind + cbExtra); > return true; >@@ -2362,8 +2123,6 @@ static bool vbsf_iter_rewind(struct iov_ > return false; > # endif > } >- >- > /** > * Cleans up the page locking stash. > */ >@@ -2372,8 +2131,6 @@ DECLINLINE(void) vbsf_iter_cleanup_stash > if (pStash->pPage) > vbsf_iter_rewind(iter, pStash, 0, 0); > } >- >- > /** > * Calculates the longest span of pages we could transfer to the host in a > * single request. >@@ -2390,15 +2147,12 @@ static size_t vbsf_iter_max_span_of_page > const struct iovec *pCurIov = iter->iov; > size_t cLeft = iter->nr_segs; > size_t cPagesSpan = 0; >- > /* iovect and kvec are identical, except for the __user tagging of iov_base. */ > AssertCompileMembersSameSizeAndOffset(struct iovec, iov_base, struct kvec, iov_base); > AssertCompileMembersSameSizeAndOffset(struct iovec, iov_len, struct kvec, iov_len); > AssertCompile(sizeof(struct iovec) == sizeof(struct kvec)); >- > cPages = 1; > AssertReturn(cLeft > 0, cPages); >- > /* Special case: segment offset. */ > if (iter->iov_offset > 0) { > if (iter->iov_offset < pCurIov->iov_len) { >@@ -2412,7 +2166,6 @@ static size_t vbsf_iter_max_span_of_page > pCurIov++; > cLeft--; > } >- > /* Full segments. */ > while (cLeft-- > 0) { > if (pCurIov->iov_len > 0) { >@@ -2455,8 +2208,6 @@ static size_t vbsf_iter_max_span_of_page > SFLOGFLOW(("vbsf_iter_max_span_of_pages: returns %#zx\n", cPages)); > return cPages; > } >- >- > /** > * Worker for vbsf_reg_read_iter() that deals with larger reads using page > * locking. >@@ -2475,7 +2226,6 @@ static ssize_t vbsf_reg_read_iter_lockin > ssize_t cbRet = 0; > size_t cMaxPages = vbsf_iter_max_span_of_pages(iter); > cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages); >- > pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); > while (!pReq && cMaxPages > 4) { > cMaxPages /= 2; >@@ -2484,7 +2234,6 @@ static ssize_t vbsf_reg_read_iter_lockin > if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack)) > papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL); > if (pReq && papPages) { >- > /* > * The read loop. > */ >@@ -2509,16 +2258,13 @@ static ssize_t vbsf_reg_read_iter_lockin > cbRet = rc; > break; > } >- > /* > * Issue the request and unlock the pages. > */ > rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, kio->ki_pos, cbChunk, cPages); > SFLOGFLOW(("vbsf_reg_read_iter_locking: VbglR0SfHostReqReadPgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n", > rc, pReq->Parms.cb32Read.u.value32, cbChunk, cbToRead, cPages, offPage0)); >- > vbsf_iter_unlock_pages(iter, papPages, cPages, true /*fSetDirty*/); >- > if (RT_SUCCESS(rc)) { > /* > * Success, advance position and buffer. >@@ -2528,7 +2274,6 @@ static ssize_t vbsf_reg_read_iter_lockin > cbRet += cbActual; > kio->ki_pos += cbActual; > cbToRead -= cbActual; >- > /* > * Are we done already? > */ >@@ -2562,7 +2307,6 @@ static ssize_t vbsf_reg_read_iter_lockin > } > } > } while (cbToRead > 0); >- > vbsf_iter_cleanup_stash(iter, &Stash); > } > else >@@ -2574,8 +2318,6 @@ static ssize_t vbsf_reg_read_iter_lockin > SFLOGFLOW(("vbsf_reg_read_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet)); > return cbRet; > } >- >- > /** > * Read into I/O vector iterator. > * >@@ -2596,20 +2338,16 @@ static ssize_t vbsf_reg_aio_read(struct > size_t cbToRead = iov_iter_count(iter); > struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode; > struct address_space *mapping = inode->i_mapping; >- > struct vbsf_reg_info *sf_r = kio->ki_filp->private_data; > struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb); >- > SFLOGFLOW(("vbsf_reg_read_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n", > inode, kio->ki_filp, cbToRead, kio->ki_pos, VBSF_GET_ITER_TYPE(iter) )); > AssertReturn(S_ISREG(inode->i_mode), -EINVAL); >- > /* > * Do we have anything at all to do here? > */ > if (!cbToRead) > return 0; >- > /* > * If there is a mapping and O_DIRECT isn't in effect, we must at a > * heed dirty pages in the mapping and read from them. For simplicity >@@ -2623,7 +2361,6 @@ static ssize_t vbsf_reg_aio_read(struct > return generic_file_aio_read(kio, iov, cSegs, offFile); > # endif > } >- > /* > * Now now we reject async I/O requests. > */ >@@ -2631,7 +2368,6 @@ static ssize_t vbsf_reg_aio_read(struct > SFLOGFLOW(("vbsf_reg_read_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */ > return -EOPNOTSUPP; > } >- > /* > * For small requests, try use an embedded buffer provided we get a heap block > * that does not cross page boundraries (see host code). >@@ -2662,14 +2398,11 @@ static ssize_t vbsf_reg_aio_read(struct > VbglR0PhysHeapFree(pReq); > } > } >- > /* > * Otherwise do the page locking thing. > */ > return vbsf_reg_read_iter_locking(kio, iter, cbToRead, pSuperInfo, sf_r); > } >- >- > /** > * Worker for vbsf_reg_write_iter() that deals with larger writes using page > * locking. >@@ -2689,7 +2422,6 @@ static ssize_t vbsf_reg_write_iter_locki > ssize_t cbRet = 0; > size_t cMaxPages = vbsf_iter_max_span_of_pages(iter); > cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages); >- > pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages])); > while (!pReq && cMaxPages > 4) { > cMaxPages /= 2; >@@ -2698,7 +2430,6 @@ static ssize_t vbsf_reg_write_iter_locki > if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack)) > papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL); > if (pReq && papPages) { >- > /* > * The write loop. > */ >@@ -2723,7 +2454,6 @@ static ssize_t vbsf_reg_write_iter_locki > cbRet = rc; > break; > } >- > /* > * Issue the request and unlock the pages. > */ >@@ -2737,22 +2467,17 @@ static ssize_t vbsf_reg_write_iter_locki > */ > uint32_t cbActual = pReq->Parms.cb32Write.u.value32; > AssertStmt(cbActual <= cbChunk, cbActual = cbChunk); >- > vbsf_reg_write_sync_page_cache(mapping, offFile, cbActual, NULL /*pbSrcBuf*/, papPages, offPage0, cPages); > vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/); >- > cbRet += cbActual; > cbToWrite -= cbActual; >- > offFile += cbActual; > if (fAppend && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET)) > offFile = pReq->Parms.off64Write.u.value64; > kio->ki_pos = offFile; > if (offFile > i_size_read(inode)) > i_size_write(inode, offFile); >- > sf_i->force_restat = 1; /* mtime (and size) may have changed */ >- > /* > * Are we done already? > */ >@@ -2788,7 +2513,6 @@ static ssize_t vbsf_reg_write_iter_locki > } > } > } while (cbToWrite > 0); >- > vbsf_iter_cleanup_stash(iter, &Stash); > } > else >@@ -2800,8 +2524,6 @@ static ssize_t vbsf_reg_write_iter_locki > SFLOGFLOW(("vbsf_reg_write_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet)); > return cbRet; > } >- >- > /** > * Write from I/O vector iterator. > * >@@ -2823,7 +2545,6 @@ static ssize_t vbsf_reg_aio_write(struct > struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode; > struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode); > struct address_space *mapping = inode->i_mapping; >- > struct vbsf_reg_info *sf_r = kio->ki_filp->private_data; > struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb); > # if RTLNX_VER_MIN(3,16,0) >@@ -2834,26 +2555,20 @@ static ssize_t vbsf_reg_aio_write(struct > # else > bool const fAppend = RT_BOOL(kio->ki_filp->f_flags & O_APPEND); > # endif >- >- > SFLOGFLOW(("vbsf_reg_write_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n", > inode, kio->ki_filp, cbToWrite, offFile, VBSF_GET_ITER_TYPE(iter) )); > AssertReturn(S_ISREG(inode->i_mode), -EINVAL); >- > /* > * Enforce APPEND flag (more later). > */ > if (fAppend) > kio->ki_pos = offFile = i_size_read(inode); >- > /* > * Do we have anything at all to do here? > */ > if (!cbToWrite) > return 0; >- > /** @todo Implement the read-write caching mode. */ >- > /* > * Now now we reject async I/O requests. > */ >@@ -2861,7 +2576,6 @@ static ssize_t vbsf_reg_aio_write(struct > SFLOGFLOW(("vbsf_reg_write_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */ > return -EOPNOTSUPP; > } >- > /* > * If there are active writable mappings, coordinate with any > * pending writes via those. >@@ -2877,7 +2591,6 @@ static ssize_t vbsf_reg_aio_write(struct > /** @todo ... */ > # endif > } >- > /* > * For small requests, try use an embedded buffer provided we get a heap block > * that does not cross page boundraries (see host code). >@@ -2897,14 +2610,12 @@ static ssize_t vbsf_reg_aio_write(struct > AssertStmt(cbRet <= (ssize_t)cbToWrite, cbRet = cbToWrite); > vbsf_reg_write_sync_page_cache(mapping, offFile, (uint32_t)cbRet, pReq->abData, > NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/); >- > offFile += cbRet; > if (fAppend && (g_fSfFeatures & SHFL_FEATURE_WRITE_UPDATES_OFFSET)) > offFile = pReq->Parms.off64Write.u.value64; > kio->ki_pos = offFile; > if (offFile > i_size_read(inode)) > i_size_write(inode, offFile); >- > # if RTLNX_VER_MIN(4,11,0) > if ((size_t)cbRet < cbToWrite) > iov_iter_revert(iter, cbToWrite - cbRet); >@@ -2921,15 +2632,12 @@ static ssize_t vbsf_reg_aio_write(struct > VbglR0PhysHeapFree(pReq); > } > } >- > /* > * Otherwise do the page locking thing. > */ > return vbsf_reg_write_iter_locking(kio, iter, cbToWrite, offFile, pSuperInfo, sf_r, inode, sf_i, mapping, fAppend); > } >- > #endif /* >= 2.6.19 */ >- > /** > * Used by vbsf_reg_open() and vbsf_inode_atomic_open() to > * >@@ -2941,7 +2649,6 @@ static ssize_t vbsf_reg_aio_write(struct > uint32_t vbsf_linux_oflags_to_vbox(unsigned fLnxOpen, uint32_t *pfHandle, const char *pszCaller) > { > uint32_t fVBoxFlags = SHFL_CF_ACCESS_DENYNONE; >- > /* > * Disposition. > */ >@@ -2963,7 +2670,6 @@ uint32_t vbsf_linux_oflags_to_vbox(unsig > fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; > } > } >- > /* > * Access. > */ >@@ -2972,27 +2678,22 @@ uint32_t vbsf_linux_oflags_to_vbox(unsig > fVBoxFlags |= SHFL_CF_ACCESS_READ; > *pfHandle |= VBSF_HANDLE_F_READ; > break; >- > case O_WRONLY: > fVBoxFlags |= SHFL_CF_ACCESS_WRITE; > *pfHandle |= VBSF_HANDLE_F_WRITE; > break; >- > case O_RDWR: > fVBoxFlags |= SHFL_CF_ACCESS_READWRITE; > *pfHandle |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE; > break; >- > default: > BUG(); > } >- > if (fLnxOpen & O_APPEND) { > Log(("%s: O_APPEND set\n", pszCaller)); > fVBoxFlags |= SHFL_CF_ACCESS_APPEND; > *pfHandle |= VBSF_HANDLE_F_APPEND; > } >- > /* > * Only directories? > */ >@@ -3000,11 +2701,8 @@ uint32_t vbsf_linux_oflags_to_vbox(unsig > Log(("%s: O_DIRECTORY set\n", pszCaller)); > fVBoxFlags |= SHFL_CF_DIRECTORY; > } >- > return fVBoxFlags; > } >- >- > /** > * Open a regular file. > * >@@ -3020,22 +2718,18 @@ static int vbsf_reg_open(struct inode *i > struct dentry *dentry = VBSF_GET_F_DENTRY(file); > struct vbsf_reg_info *sf_r; > VBOXSFCREATEREQ *pReq; >- > SFLOGFLOW(("vbsf_reg_open: inode=%p file=%p flags=%#x %s\n", inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL)); > Assert(pSuperInfo); > Assert(sf_i); >- > sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL); > if (!sf_r) { > LogRelFunc(("could not allocate reg info\n")); > return -ENOMEM; > } >- > RTListInit(&sf_r->Handle.Entry); > sf_r->Handle.cRefs = 1; > sf_r->Handle.fFlags = VBSF_HANDLE_F_FILE | VBSF_HANDLE_F_MAGIC; > sf_r->Handle.hHost = SHFL_HANDLE_NIL; >- > /* Already open? */ > if (sf_i->handle != SHFL_HANDLE_NIL) { > /* >@@ -3047,13 +2741,11 @@ static int vbsf_reg_open(struct inode *i > sf_r->Handle.hHost = sf_i->handle; > sf_i->handle = SHFL_HANDLE_NIL; > file->private_data = sf_r; >- > sf_r->Handle.fFlags |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE; /** @todo fix */ > vbsf_handle_append(sf_i, &sf_r->Handle); > SFLOGFLOW(("vbsf_reg_open: returns 0 (#1) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost)); > return 0; > } >- > pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + sf_i->path->u16Size); > if (!pReq) { > kfree(sf_r); >@@ -3063,14 +2755,12 @@ static int vbsf_reg_open(struct inode *i > memcpy(&pReq->StrPath, sf_i->path, SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size); > RT_ZERO(pReq->CreateParms); > pReq->CreateParms.Handle = SHFL_HANDLE_NIL; >- > /* We check the value of pReq->CreateParms.Handle afterwards to > * find out if the call succeeded or failed, as the API does not seem > * to cleanly distinguish error and informational messages. > * > * Furthermore, we must set pReq->CreateParms.Handle to SHFL_HANDLE_NIL > * to make the shared folders host service use our fMode parameter */ >- > /* We ignore O_EXCL, as the Linux kernel seems to call create > beforehand itself, so O_EXCL should always fail. */ > pReq->CreateParms.CreateFlags = vbsf_linux_oflags_to_vbox(file->f_flags & ~O_EXCL, &sf_r->Handle.fFlags, __FUNCTION__); >@@ -3084,7 +2774,6 @@ static int vbsf_reg_open(struct inode *i > VbglR0PhysHeapFree(pReq); > return -RTErrConvertToErrno(rc); > } >- > if (pReq->CreateParms.Handle != SHFL_HANDLE_NIL) { > vbsf_dentry_chain_increase_ttl(dentry); > vbsf_update_inode(inode, sf_i, &pReq->CreateParms.Info, pSuperInfo, false /*fInodeLocked*/, 0 /*fSetAttrs*/); >@@ -3111,7 +2800,6 @@ static int vbsf_reg_open(struct inode *i > break; > } > } >- > sf_r->Handle.hHost = pReq->CreateParms.Handle; > file->private_data = sf_r; > vbsf_handle_append(sf_i, &sf_r->Handle); >@@ -3119,8 +2807,6 @@ static int vbsf_reg_open(struct inode *i > SFLOGFLOW(("vbsf_reg_open: returns 0 (#2) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost)); > return rc_linux; > } >- >- > /** > * Close a regular file. > * >@@ -3132,13 +2818,11 @@ static int vbsf_reg_release(struct inode > { > struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode); > struct vbsf_reg_info *sf_r = file->private_data; >- > SFLOGFLOW(("vbsf_reg_release: inode=%p file=%p\n", inode, file)); > if (sf_r) { > struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb); > struct address_space *mapping = inode->i_mapping; > Assert(pSuperInfo); >- > /* If we're closing the last handle for this inode, make sure the flush > the mapping or we'll end up in vbsf_writepage without a handle. */ > if ( mapping >@@ -3152,17 +2836,13 @@ static int vbsf_reg_release(struct inode > #endif > filemap_fdatawait(inode->i_mapping); > } >- > /* Release sf_r, closing the handle if we're the last user. */ > file->private_data = NULL; > vbsf_handle_release(&sf_r->Handle, pSuperInfo, "vbsf_reg_release"); >- > sf_i->handle = SHFL_HANDLE_NIL; > } > return 0; > } >- >- > /** > * Wrapper around generic/default seek function that ensures that we've got > * the up-to-date file size when doing anything relative to EOF. >@@ -3175,7 +2855,6 @@ static int vbsf_reg_release(struct inode > static loff_t vbsf_reg_llseek(struct file *file, loff_t off, int whence) > { > SFLOGFLOW(("vbsf_reg_llseek: file=%p off=%lld whence=%d\n", file, off, whence)); >- > switch (whence) { > #ifdef SEEK_HOLE > case SEEK_HOLE: >@@ -3190,15 +2869,12 @@ static loff_t vbsf_reg_llseek(struct fil > return rc; > } > } >- > #if RTLNX_VER_MIN(2,4,8) > return generic_file_llseek(file, off, whence); > #else > return default_llseek(file, off, whence); > #endif > } >- >- > /** > * Flush region of file - chiefly mmap/msync. > * >@@ -3231,9 +2907,7 @@ static int vbsf_reg_fsync(struct file *f > int rc; > struct inode *inode = dentry->d_inode; > AssertReturn(inode, -EINVAL); >- > /** @todo What about file_fsync()? (<= 2.5.11) */ >- > # if RTLNX_VER_MIN(2,5,12) > rc = sync_mapping_buffers(inode->i_mapping); > if ( rc == 0 >@@ -3259,14 +2933,11 @@ static int vbsf_reg_fsync(struct file *f > if (rc == 0 && datasync) > rc = fsync_inode_data_buffers(inode); > # endif >- > # endif /* < 2.5.12 */ > return rc; > # endif > } > #endif /* < 2.6.35 */ >- >- > #if RTLNX_VER_MIN(4,5,0) > /** > * Copy a datablock from one file to another on the host side. >@@ -3285,7 +2956,6 @@ static ssize_t vbsf_reg_copy_file_range( > struct vbsf_super_info *pSuperInfoDst = VBSF_GET_SUPER_INFO(pInodeDst->i_sb); > struct vbsf_reg_info *pFileInfoDst = (struct vbsf_reg_info *)pFileDst->private_data; > VBOXSFCOPYFILEPARTREQ *pReq; >- > /* > * Some extra validation. > */ >@@ -3293,12 +2963,10 @@ static ssize_t vbsf_reg_copy_file_range( > Assert(pInodeInfoSrc->u32Magic == SF_INODE_INFO_MAGIC); > AssertPtrReturn(pInodeInfoDst, -EOPNOTSUPP); > Assert(pInodeInfoDst->u32Magic == SF_INODE_INFO_MAGIC); >- > # if RTLNX_VER_MAX(4,11,0) > if (!S_ISREG(pInodeSrc->i_mode) || !S_ISREG(pInodeDst->i_mode)) > return S_ISDIR(pInodeSrc->i_mode) || S_ISDIR(pInodeDst->i_mode) ? -EISDIR : -EINVAL; > # endif >- > /* > * Allocate the request and issue it. > */ >@@ -3313,7 +2981,6 @@ static ssize_t vbsf_reg_copy_file_range( > cbRet = -EOPNOTSUPP; > else > cbRet = -RTErrConvertToErrno(vrc); >- > VbglR0PhysHeapFree(pReq); > } else > cbRet = -ENOMEM; >@@ -3324,19 +2991,14 @@ static ssize_t vbsf_reg_copy_file_range( > return cbRet; > } > #endif /* > 4.5 */ >- >- > #ifdef SFLOG_ENABLED > /* > * This is just for logging page faults and such. > */ >- > /** Pointer to the ops generic_file_mmap returns the first time it's called. */ > static struct vm_operations_struct const *g_pGenericFileVmOps = NULL; > /** Merge of g_LoggingVmOpsTemplate and g_pGenericFileVmOps. */ > static struct vm_operations_struct g_LoggingVmOps; >- >- > /* Generic page fault callback: */ > # if RTLNX_VER_MIN(4,11,0) > static vm_fault_t vbsf_vmlog_fault(struct vm_fault *vmf) >@@ -3361,8 +3023,6 @@ static int vbsf_vmlog_fault(struct vm_ar > return rc; > } > # endif >- >- > /* Special/generic page fault handler: */ > # if RTLNX_VER_MIN(2,6,26) > # elif RTLNX_VER_MIN(2,6,1) >@@ -3384,8 +3044,6 @@ static struct page *vbsf_vmlog_nopage(st > return page; > } > # endif /* < 2.6.26 */ >- >- > /* Special page fault callback for making something writable: */ > # if RTLNX_VER_MIN(4,11,0) > static vm_fault_t vbsf_vmlog_page_mkwrite(struct vm_fault *vmf) >@@ -3419,8 +3077,6 @@ static int vbsf_vmlog_page_mkwrite(struc > return rc; > } > # endif >- >- > /* Special page fault callback for mapping pages: */ > # if RTLNX_VER_MIN(5,12,0) > static vm_fault_t vbsf_vmlog_map_pages(struct vm_fault *vmf, pgoff_t start, pgoff_t end) >@@ -3453,8 +3109,6 @@ static void vbsf_vmlog_map_pages(struct > SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n")); > } > # endif >- >- > /** Overload template. */ > static struct vm_operations_struct const g_LoggingVmOpsTemplate = { > # if RTLNX_VER_MIN(2,6,23) >@@ -3470,7 +3124,6 @@ static struct vm_operations_struct const > .map_pages = vbsf_vmlog_map_pages, > # endif > }; >- > /** file_operations::mmap wrapper for logging purposes. */ > extern int vbsf_reg_mmap(struct file *file, struct vm_area_struct *vma) > { >@@ -3500,10 +3153,7 @@ extern int vbsf_reg_mmap(struct file *fi > SFLOGFLOW(("vbsf_reg_mmap: returns %d\n", rc)); > return rc; > } >- > #endif /* SFLOG_ENABLED */ >- >- > /** > * File operations for regular files. > * >@@ -3555,8 +3205,6 @@ struct file_operations vbsf_reg_fops = { > .copy_file_range = vbsf_reg_copy_file_range, > #endif > }; >- >- > /** > * Inodes operations for regular files. > */ >@@ -3568,13 +3216,9 @@ struct inode_operations vbsf_reg_iops = > #endif > .setattr = vbsf_inode_setattr, > }; >- >- >- > /********************************************************************************************************************************* > * Address Space Operations on Regular Files (for mmap, sendfile, direct I/O) * > *********************************************************************************************************************************/ >- > /** > * Used to read the content of a page into the page cache. > * >@@ -3585,15 +3229,12 @@ static int vbsf_readpage(struct file *fi > { > struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode; > int err; >- > SFLOGFLOW(("vbsf_readpage: inode=%p file=%p page=%p off=%#llx\n", inode, file, page, (uint64_t)page->index << PAGE_SHIFT)); > Assert(PageLocked(page)); >- > if (PageUptodate(page)) { > unlock_page(page); > return 0; > } >- > if (!is_bad_inode(inode)) { > VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq)); > if (pReq) { >@@ -3601,7 +3242,6 @@ static int vbsf_readpage(struct file *fi > struct vbsf_reg_info *sf_r = file->private_data; > uint32_t cbRead; > int vrc; >- > pReq->PgLst.offFirstPage = 0; > pReq->PgLst.aPages[0] = page_to_phys(page); > vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, >@@ -3610,11 +3250,9 @@ static int vbsf_readpage(struct file *fi > (uint64_t)page->index << PAGE_SHIFT, > PAGE_SIZE, > 1 /*cPages*/); >- > cbRead = pReq->Parms.cb32Read.u.value32; > AssertStmt(cbRead <= PAGE_SIZE, cbRead = PAGE_SIZE); > VbglR0PhysHeapFree(pReq); >- > if (RT_SUCCESS(vrc)) { > if (cbRead == PAGE_SIZE) { > /* likely */ >@@ -3624,7 +3262,6 @@ static int vbsf_readpage(struct file *fi > kunmap(page); > /** @todo truncate the inode file size? */ > } >- > flush_dcache_page(page); > SetPageUptodate(page); > unlock_page(page); >@@ -3639,8 +3276,6 @@ static int vbsf_readpage(struct file *fi > unlock_page(page); > return err; > } >- >- > /** > * Used to write out the content of a dirty page cache page to the host file. > * >@@ -3658,10 +3293,8 @@ static int vbsf_writepage(struct page *p > struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode); > struct vbsf_handle *pHandle = vbsf_handle_find(sf_i, VBSF_HANDLE_F_WRITE, VBSF_HANDLE_F_APPEND); > int err; >- > SFLOGFLOW(("vbsf_writepage: inode=%p page=%p off=%#llx pHandle=%p (%#llx)\n", > inode, page, (uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle ? pHandle->hHost : 0)); >- > if (pHandle) { > struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb); > VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq)); >@@ -3671,7 +3304,6 @@ static int vbsf_writepage(struct page *p > uint32_t const cbToWrite = page->index != (cbFile >> PAGE_SHIFT) ? PAGE_SIZE > : (uint32_t)cbFile & (uint32_t)PAGE_OFFSET_MASK; > int vrc; >- > pReq->PgLst.offFirstPage = 0; > pReq->PgLst.aPages[0] = page_to_phys(page); > vrc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root, >@@ -3685,7 +3317,6 @@ static int vbsf_writepage(struct page *p > ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite), > vrc = VERR_WRITE_ERROR); > VbglR0PhysHeapFree(pReq); >- > if (RT_SUCCESS(vrc)) { > /* Update the inode if we've extended the file. */ > /** @todo is this necessary given the cbToWrite calc above? */ >@@ -3693,17 +3324,14 @@ static int vbsf_writepage(struct page *p > if ( offEndOfWrite > cbFile > && offEndOfWrite > i_size_read(inode)) > i_size_write(inode, offEndOfWrite); >- > /* Update and unlock the page. */ > if (PageError(page)) > ClearPageError(page); > SetPageUptodate(page); > unlock_page(page); >- > vbsf_handle_release(pHandle, pSuperInfo, "vbsf_writepage"); > return 0; > } >- > /* > * We failed. > */ >@@ -3722,8 +3350,6 @@ static int vbsf_writepage(struct page *p > unlock_page(page); > return err; > } >- >- > #if RTLNX_VER_MIN(2,6,24) > /** > * Called when writing thru the page cache (which we shouldn't be doing). >@@ -3748,7 +3374,6 @@ int vbsf_write_begin(struct file *file, > return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata); > } > #endif /* KERNEL_VERSION >= 2.6.24 */ >- > #if RTLNX_VER_MIN(5,14,0) > /** > * Companion to vbsf_write_begin (i.e. shouldn't be called). >@@ -3771,14 +3396,10 @@ static int vbsf_write_end(struct file *f > return -ENOTSUPP; > } > #endif /* KERNEL_VERSION >= 5.14.0 */ >- >- > #if RTLNX_VER_MIN(2,4,10) >- > # ifdef VBOX_UEK > # undef iov_iter /* HACK ALERT! Don't put anything needing vbsf_iov_iter after this fun! */ > # endif >- > /** > * This is needed to make open accept O_DIRECT as well as dealing with direct > * I/O requests if we don't intercept them earlier. >@@ -3811,9 +3432,7 @@ static int vbsf_direct_IO(int rw, struct > TRACE(); > return -EINVAL; > } >- > #endif >- > /** > * Address space (for the page cache) operations for regular files. > * >@@ -3823,7 +3442,9 @@ struct address_space_operations vbsf_reg > .readpage = vbsf_readpage, > .writepage = vbsf_writepage, > /** @todo Need .writepages if we want msync performance... */ >-#if RTLNX_VER_MIN(2,5,12) >+#if RTLNX_VER_MIN(5,18,0) >+ .dirty_folio = filemap_dirty_folio, >+#elif RTLNX_VER_MIN(2,5,12) > .set_page_dirty = __set_page_dirty_buffers, > #endif > #if RTLNX_VER_MIN(5,14,0) >@@ -3840,4 +3461,3 @@ struct address_space_operations vbsf_reg > .direct_IO = vbsf_direct_IO, > #endif > }; >-
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 847097
: 780752 |
780755
|
780785
|
780788