Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 751328
Collapse All | Expand All

(-)vboxdrv/r0drv/linux/memobj-r0drv-linux.c (-1 / +60 lines)
Lines 56-64 Link Here
56
 * Whether we use alloc_vm_area (3.2+) for executable memory.
56
 * Whether we use alloc_vm_area (3.2+) for executable memory.
57
 * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
57
 * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
58
 * better W^R compliance (fExecutable flag). */
58
 * better W^R compliance (fExecutable flag). */
59
#if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING)
59
#if RTLNX_VER_RANGE(3,2,0, 5,10,0) || defined(DOXYGEN_RUNNING)
60
# define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
60
# define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
61
#endif
61
#endif
62
#if RTLNX_VER_MIN(5,10,0) || defined(DOXYGEN_RUNNING)
63
# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
64
#endif
62
65
63
/*
66
/*
64
 * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
67
 * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
Lines 502-508 Link Here
502
}
505
}
503
506
504
507
508
#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
505
/**
509
/**
510
 * User data passed to the apply_to_page_range() callback.
511
 */
512
typedef struct LNXAPPLYPGRANGE
513
{
514
    /** Pointer to the memory object. */
515
    PRTR0MEMOBJLNX pMemLnx;
516
    /** The page protection flags to apply. */
517
    pgprot_t       fPg;
518
} LNXAPPLYPGRANGE;
519
/** Pointer to the user data. */
520
typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE;
521
/** Pointer to the const user data. */
522
typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE;
523
524
/**
525
 * Callback called in apply_to_page_range().
526
 *
527
 * @returns Linux status code.
528
 * @param   pPte                Pointer to the page table entry for the given address.
529
 * @param   uAddr               The address to apply the new protection to.
530
 * @param   pvUser              The opaque user data.
531
 */
532
static DECLCALLBACK(int) rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr, void *pvUser)
533
{
534
    PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser;
535
    PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx;
536
    uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
537
538
    set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg));
539
    return 0;
540
}
541
#endif
542
543
544
/**
506
 * Maps the allocation into ring-0.
545
 * Maps the allocation into ring-0.
507
 *
546
 *
508
 * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
547
 * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
Lines 584-589 Link Here
584
        else
623
        else
585
# endif
624
# endif
586
        {
625
        {
626
#  if defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
627
            if (fExecutable)
628
                pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */
629
#  endif
630
587
# ifdef VM_MAP
631
# ifdef VM_MAP
588
            pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
632
            pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
589
# else
633
# else
Lines 1851-1856 Link Here
1851
        preempt_enable();
1895
        preempt_enable();
1852
        return VINF_SUCCESS;
1896
        return VINF_SUCCESS;
1853
    }
1897
    }
1898
# elif defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
1899
    PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
1900
    if (   pMemLnx->fExecutable
1901
        && pMemLnx->fMappedToRing0)
1902
    {
1903
        LNXAPPLYPGRANGE Args;
1904
        Args.pMemLnx = pMemLnx;
1905
        Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
1906
        int rcLnx = apply_to_page_range(current->active_mm, (unsigned long)pMemLnx->Core.pv + offSub, cbSub,
1907
                                        rtR0MemObjLinuxApplyPageRange, (void *)&Args);
1908
        if (rcLnx)
1909
            return VERR_NOT_SUPPORTED;
1910
1911
        return VINF_SUCCESS;
1912
    }
1854
# endif
1913
# endif
1855
1914
1856
    NOREF(pMem);
1915
    NOREF(pMem);

Return to bug 751328