diff -r -U8 kernel_/common/inc/nv-linux.h kernel/common/inc/nv-linux.h --- kernel_/common/inc/nv-linux.h 2016-09-02 03:32:09.000000000 +0200 +++ kernel/common/inc/nv-linux.h 2016-09-12 21:18:48.100329700 +0200 @@ -1307,16 +1307,19 @@ #else #define NV_KMEM_CACHE_CREATE_FULL(name, size, align, flags, ctor) \ kmem_cache_create(name, size, align, flags, ctor, NULL) #endif #define NV_KMEM_CACHE_CREATE(name, type) \ NV_KMEM_CACHE_CREATE_FULL(name, sizeof(type), 0, 0, NULL) +#define NV_KMEM_CACHE_CREATE_USERCOPY(name, type) \ + NV_KMEM_CACHE_CREATE_FULL(name, sizeof(type), 0, SLAB_USERCOPY, NULL) + #define NV_KMEM_CACHE_DESTROY(kmem_cache) \ kmem_cache_destroy(kmem_cache) #define NV_KMEM_CACHE_ALLOC(kmem_cache) \ kmem_cache_alloc(kmem_cache, GFP_KERNEL) #define NV_KMEM_CACHE_FREE(ptr, kmem_cache) \ kmem_cache_free(kmem_cache, ptr) diff -r -U8 kernel_/common/inc/nv-modeset-interface.h kernel/common/inc/nv-modeset-interface.h --- kernel_/common/inc/nv-modeset-interface.h 2016-09-02 03:32:09.000000000 +0200 +++ kernel/common/inc/nv-modeset-interface.h 2016-09-13 01:57:32.351888874 +0200 @@ -67,17 +67,17 @@ typedef struct { /* * The nvidia-modeset kernel module should assign version_string * before passing the structure to the nvidia kernel module, so * that a version match can be confirmed: it is not supported to * mix nvidia and nvidia-modeset kernel modules from different * releases. */ - const char *version_string; + // const char *version_string; /* * Return system information. */ struct { /* Availability of write combining support for video memory */ NvBool allow_write_combining; } system_info; @@ -112,11 +112,11 @@ void NV_MODESET_INTERFACE_API_CALL (*op) (nvidia_modeset_stack_ptr sp, void *ops_cmd); int (*set_callbacks)(const nvidia_modeset_callbacks_t *cb); } nvidia_modeset_rm_ops_t; -NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops); +NV_STATUS nvidia_get_rm_ops(const nvidia_modeset_rm_ops_t **rm_ops, const char **version_string); #endif /* _NV_MODESET_INTERFACE_H_ */ diff -r -U8 kernel_/common/inc/nv-register-module.h kernel/common/inc/nv-register-module.h --- kernel_/common/inc/nv-register-module.h 2016-09-02 03:32:09.000000000 +0200 +++ kernel/common/inc/nv-register-module.h 2016-09-12 21:17:30.880331735 +0200 @@ -29,14 +29,14 @@ /* file operations */ int (*open)(struct inode *, struct file *filp); int (*close)(struct inode *, struct file *filp); int (*mmap)(struct file *filp, struct vm_area_struct *vma); int (*ioctl)(struct inode *, struct file * file, unsigned int cmd, unsigned long arg); unsigned int (*poll)(struct file * file, poll_table *wait); -} nvidia_module_t; +} __do_const nvidia_module_t; int nvidia_register_module(nvidia_module_t *); int nvidia_unregister_module(nvidia_module_t *); #endif diff -r -U8 kernel_/nvidia/nv.c kernel/nvidia/nv.c --- kernel_/nvidia/nv.c 2016-09-02 03:32:09.000000000 +0200 +++ kernel/nvidia/nv.c 2016-09-12 21:19:25.868328704 +0200 @@ -655,18 +655,18 @@ #if defined(VM_CHECKER) NV_SPIN_LOCK_INIT(&vm_lock); #endif #if defined(KM_CHECKER) NV_SPIN_LOCK_INIT(&km_lock); #endif - nvidia_stack_t_cache = NV_KMEM_CACHE_CREATE(nvidia_stack_cache_name, - nvidia_stack_t); + nvidia_stack_t_cache = NV_KMEM_CACHE_CREATE_USERCOPY(nvidia_stack_cache_name, + nvidia_stack_t); if (nvidia_stack_t_cache == NULL) { nv_printf(NV_DBG_ERRORS, "NVRM: stack cache allocation failed!\n"); rc = -ENOMEM; goto failed5; } rc = nv_kmem_cache_alloc_stack(&sp); diff -r -U8 kernel_/nvidia/nv-chrdev.c kernel/nvidia/nv-chrdev.c --- kernel_/nvidia/nv-chrdev.c 2016-09-02 03:32:09.000000000 +0200 +++ kernel/nvidia/nv-chrdev.c 2016-09-12 21:17:03.204332465 +0200 @@ -15,18 +15,16 @@ #include "nv-linux.h" #include "nv-frontend.h" #include "nv-instance.h" int nv_register_chrdev(void *param) { nvidia_module_t *module = (nvidia_module_t *)param; - module->instance = nv_module_instance; - return (nvidia_register_module(module)); } void nv_unregister_chrdev(void *param) { nvidia_module_t *module = (nvidia_module_t *)param; nvidia_unregister_module(module); Binary files kernel_/nvidia/nv-chrdev.o and kernel/nvidia/nv-chrdev.o differ diff -r -U8 kernel_/nvidia/nv-instance.c kernel/nvidia/nv-instance.c --- kernel_/nvidia/nv-instance.c 2016-09-02 03:32:09.000000000 +0200 +++ kernel/nvidia/nv-instance.c 2016-09-12 21:16:42.268333017 +0200 @@ -46,16 +46,17 @@ .err_handler = &nv_pci_error_handlers, #endif }; /* character device entry points*/ nvidia_module_t nv_fops = { .owner = THIS_MODULE, .module_name = MODULE_NAME, + .instance = MODULE_INSTANCE_NUMBER, .open = nvidia_open, .close = nvidia_close, .ioctl = nvidia_ioctl, .mmap = nvidia_mmap, .poll = nvidia_poll, }; /* diff -r -U8 kernel_/nvidia/nv-mmap.c kernel/nvidia/nv-mmap.c --- kernel_/nvidia/nv-mmap.c 2016-09-02 03:32:09.000000000 +0200 +++ kernel/nvidia/nv-mmap.c 2016-09-13 02:18:54.459855079 +0200 @@ -108,22 +108,22 @@ nvfp = NV_GET_FILE_PRIVATE(NV_VMA_FILE(vma)); at->next = nvfp->free_list; nvfp->free_list = at; } } } #if defined(NV_VM_OPERATIONS_STRUCT_HAS_ACCESS) -static int +static ssize_t nvidia_vma_access( struct vm_area_struct *vma, unsigned long addr, void *buffer, - int length, + size_t length, int write ) { nv_alloc_t *at = NULL; nv_file_private_t *nvfp = NV_GET_FILE_PRIVATE(NV_VMA_FILE(vma)); nv_state_t *nv = NV_STATE_PTR(nvfp->nvptr); NvU32 pageIndex, pageOffset; void *kernel_mapping; diff -r -U8 kernel_/nvidia/nv-modeset-interface.c kernel/nvidia/nv-modeset-interface.c --- kernel_/nvidia/nv-modeset-interface.c 2016-09-02 03:32:09.000000000 +0200 +++ kernel/nvidia/nv-modeset-interface.c 2016-09-13 02:01:46.519882174 +0200 @@ -95,40 +95,39 @@ count++; } UNLOCK_NV_LINUX_DEVICES(); return count; } -NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops) +NV_STATUS nvidia_get_rm_ops(const nvidia_modeset_rm_ops_t **rm_ops, const char **version_string) { - const nvidia_modeset_rm_ops_t local_rm_ops = { - .version_string = NV_VERSION_STRING, + static nvidia_modeset_rm_ops_t local_rm_ops = { .system_info = { .allow_write_combining = NV_FALSE, }, .alloc_stack = nvidia_modeset_rm_ops_alloc_stack, .free_stack = nvidia_modeset_rm_ops_free_stack, .enumerate_gpus = nvidia_modeset_enumerate_gpus, .open_gpu = nvidia_dev_get, .close_gpu = nvidia_dev_put, .op = rm_kernel_rmapi_op, /* provided by nv-kernel.o */ .set_callbacks = nvidia_modeset_set_callbacks, }; - if (strcmp(rm_ops->version_string, NV_VERSION_STRING) != 0) + if (strcmp(*version_string, NV_VERSION_STRING) != 0) { - rm_ops->version_string = NV_VERSION_STRING; + *version_string = NV_VERSION_STRING; return NV_ERR_GENERIC; } - *rm_ops = local_rm_ops; + *rm_ops = (const nvidia_modeset_rm_ops_t *) &local_rm_ops; if (NV_ALLOW_WRITE_COMBINING(NV_MEMORY_TYPE_FRAMEBUFFER)) { - rm_ops->system_info.allow_write_combining = NV_TRUE; + local_rm_ops.system_info.allow_write_combining = NV_TRUE; } return NV_OK; } EXPORT_SYMBOL(nvidia_get_rm_ops); diff -r -U8 kernel_/nvidia-drm/nvidia-drm-drv.c kernel/nvidia-drm/nvidia-drm-drv.c --- kernel_/nvidia-drm/nvidia-drm-drv.c 2016-09-02 03:33:06.000000000 +0200 +++ kernel/nvidia-drm/nvidia-drm-drv.c 2016-09-12 21:23:12.280322736 +0200 @@ -607,17 +607,17 @@ DRM_IOCTL_DEF_DRV(NVIDIA_GEM_PRIME_FENCE_FORCE_SIGNAL, nvidia_drm_gem_prime_fence_force_signal, DRM_CONTROL_ALLOW|DRM_RENDER_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(NVIDIA_GEM_PRIME_FENCE_FINI, nvidia_drm_gem_prime_fence_fini, DRM_CONTROL_ALLOW|DRM_RENDER_ALLOW|DRM_UNLOCKED), }; -static struct drm_driver nv_drm_driver = { +static drm_driver_no_const nv_drm_driver __read_only = { .driver_features = DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER, .gem_free_object = nvidia_drm_gem_free, .ioctls = nv_drm_ioctls, .num_ioctls = ARRAY_SIZE(nv_drm_ioctls), @@ -668,24 +668,26 @@ { #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) if (!nvidia_drm_modeset_module_param) { return; } + pax_open_kernel(); nv_drm_driver.driver_features |= DRIVER_MODESET | DRIVER_ATOMIC; nv_drm_driver.master_set = nvidia_drm_master_set; nv_drm_driver.master_drop = nvidia_drm_master_drop; nv_drm_driver.dumb_create = nvidia_drm_dumb_create; nv_drm_driver.dumb_map_offset = nvidia_drm_dumb_map_offset; nv_drm_driver.dumb_destroy = drm_gem_dumb_destroy; + pax_close_kernel(); #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ } /* * Helper function for allocate/register DRM device for given NVIDIA GPU ID. diff -r -U8 kernel_/nvidia-modeset/nvidia-modeset-linux.c kernel/nvidia-modeset/nvidia-modeset-linux.c --- kernel_/nvidia-modeset/nvidia-modeset-linux.c 2016-09-02 03:33:06.000000000 +0200 +++ kernel/nvidia-modeset/nvidia-modeset-linux.c 2016-09-13 02:14:23.111862232 +0200 @@ -317,63 +317,63 @@ up(&nvkms_lock); } /************************************************************************* * Interface with resman. *************************************************************************/ -static nvidia_modeset_rm_ops_t __rm_ops = { 0 }; +static const nvidia_modeset_rm_ops_t *__rm_ops; + static nvidia_modeset_callbacks_t nvkms_rm_callbacks = { - nvkms_suspend, - nvkms_resume + .suspend = nvkms_suspend, + .resume = nvkms_resume }; static int nvkms_alloc_rm(void) { NV_STATUS nvstatus; int ret; + const char * version_string = NV_VERSION_STRING; - __rm_ops.version_string = NV_VERSION_STRING; - - nvstatus = nvidia_get_rm_ops(&__rm_ops); + nvstatus = nvidia_get_rm_ops(&__rm_ops, &version_string); if (nvstatus != NV_OK) { printk(KERN_ERR NVKMS_LOG_PREFIX "Version mismatch: " "nvidia.ko(%s) nvidia-modeset.ko(%s)\n", - __rm_ops.version_string, NV_VERSION_STRING); + version_string, NV_VERSION_STRING); return -EINVAL; } - ret = __rm_ops.set_callbacks(&nvkms_rm_callbacks); + ret = __rm_ops->set_callbacks(&nvkms_rm_callbacks); if (ret < 0) { printk(KERN_ERR NVKMS_LOG_PREFIX "Failed to register callbacks\n"); return ret; } return 0; } static void nvkms_free_rm(void) { - __rm_ops.set_callbacks(NULL); + __rm_ops->set_callbacks(NULL); } void NVKMS_API_CALL nvkms_call_rm(void *ops) { nvidia_modeset_stack_ptr stack = NULL; - if (__rm_ops.alloc_stack(&stack) != 0) { + if (__rm_ops->alloc_stack(&stack) != 0) { return; } - __rm_ops.op(stack, ops); + __rm_ops->op(stack, ops); - __rm_ops.free_stack(stack); + __rm_ops->free_stack(stack); } /************************************************************************* * ref_ptr implementation. *************************************************************************/ struct nvkms_ref_ptr { struct kref refcnt; @@ -685,48 +685,48 @@ return data; } NvBool NVKMS_API_CALL nvkms_open_gpu(NvU32 gpuId) { nvidia_modeset_stack_ptr stack = NULL; NvBool ret; - if (__rm_ops.alloc_stack(&stack) != 0) { + if (__rm_ops->alloc_stack(&stack) != 0) { return NV_FALSE; } - ret = __rm_ops.open_gpu(gpuId, stack) == 0; + ret = __rm_ops->open_gpu(gpuId, stack) == 0; - __rm_ops.free_stack(stack); + __rm_ops->free_stack(stack); return ret; } void NVKMS_API_CALL nvkms_close_gpu(NvU32 gpuId) { nvidia_modeset_stack_ptr stack = NULL; - if (__rm_ops.alloc_stack(&stack) != 0) { + if (__rm_ops->alloc_stack(&stack) != 0) { return; } - __rm_ops.close_gpu(gpuId, stack); + __rm_ops->close_gpu(gpuId, stack); - __rm_ops.free_stack(stack); + __rm_ops->free_stack(stack); } NvU32 NVKMS_API_CALL nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info) { - return __rm_ops.enumerate_gpus(gpu_info); + return __rm_ops->enumerate_gpus(gpu_info); } NvBool NVKMS_API_CALL nvkms_allow_write_combining(void) { - return __rm_ops.system_info.allow_write_combining; + return __rm_ops->system_info.allow_write_combining; } /************************************************************************* * Common to both user-space and kapi NVKMS interfaces *************************************************************************/ static void nvkms_kapi_event_work_queue_callback(NVKMS_WORK_FUNC_ARG_T *arg) { diff -r -U8 kernel_/nvidia-uvm/uvm8_global.c kernel/nvidia-uvm/uvm8_global.c --- kernel_/nvidia-uvm/uvm8_global.c 2016-09-02 03:30:48.000000000 +0200 +++ kernel/nvidia-uvm/uvm8_global.c 2016-09-13 01:43:42.647910744 +0200 @@ -30,27 +30,27 @@ #include "uvm8_thread_context.h" #include "uvm8_va_range.h" #include "uvm8_kvmalloc.h" #include "uvm8_mmu.h" #include "uvm8_perf_heuristics.h" #include "nv_uvm_interface.h" uvm_global_t g_uvm_global; -static struct UvmOpsUvmEvents g_exported_uvm8_ops; +static struct UvmOpsUvmEvents g_exported_uvm8_ops = { + .startDevice = NULL, + .stopDevice = NULL, + .isrTopHalf = uvm8_isr_top_half, +}; static bool g_ops_registered = false; static NV_STATUS uvm8_register_callbacks(void) { NV_STATUS status = NV_OK; - g_exported_uvm8_ops.startDevice = NULL; - g_exported_uvm8_ops.stopDevice = NULL; - g_exported_uvm8_ops.isrTopHalf = uvm8_isr_top_half; - // Register the UVM callbacks with the main GPU driver: status = uvm_rm_locked_call(nvUvmInterfaceRegisterUvmCallbacks(&g_exported_uvm8_ops)); if (status != NV_OK) return status; g_ops_registered = true; return NV_OK; } diff -r -U8 kernel_/nvidia-uvm/uvm8_gpu_semaphore.c kernel/nvidia-uvm/uvm8_gpu_semaphore.c --- kernel_/nvidia-uvm/uvm8_gpu_semaphore.c 2016-09-02 03:30:48.000000000 +0200 +++ kernel/nvidia-uvm/uvm8_gpu_semaphore.c 2016-09-13 01:44:14.019909917 +0200 @@ -347,33 +347,33 @@ { NvU32 index = get_index(semaphore); NvU64 base_va = uvm_rm_mem_get_gpu_va(semaphore->page->memory, gpu); return base_va + UVM_SEMAPHORE_SIZE * index; } NvU32 uvm_gpu_semaphore_get_payload(uvm_gpu_semaphore_t *semaphore) { - return ACCESS_ONCE(*semaphore->payload); + return ACCESS_ONCE_RW(*semaphore->payload); } void uvm_gpu_semaphore_set_payload(uvm_gpu_semaphore_t *semaphore, NvU32 payload) { // Provide a guarantee that all memory accesses prior to setting the payload // won't be moved past it. // Use a big hammer mb() as set_payload() is not used in any performance path // today. // This could likely be optimized to be either an smp_store_release() or use // an smp_mb__before_atomic() barrier. The former is a recent addition to // kernel though, and it's not clear whether combining the latter with a // regular 32bit store is well defined in all cases. Both also seem to risk // being optimized out on non-SMP configs (we need them for interacting with // the GPU correctly even on non-SMP). mb(); - ACCESS_ONCE(*semaphore->payload) = payload; + ACCESS_ONCE_RW(*semaphore->payload) = payload; } // This function is intended to catch channels which have been left dangling in // trackers after their owning GPUs have been destroyed. static bool tracking_semaphore_check_gpu(uvm_gpu_tracking_semaphore_t *tracking_sem) { uvm_gpu_t *gpu = tracking_sem->semaphore.page->pool->gpu; uvm_gpu_t *table_gpu; diff -r -U8 kernel_/nvidia-uvm/uvm8_hal.h kernel/nvidia-uvm/uvm8_hal.h --- kernel_/nvidia-uvm/uvm8_hal.h 2016-09-02 03:30:48.000000000 +0200 +++ kernel/nvidia-uvm/uvm8_hal.h 2016-09-13 01:44:46.219909068 +0200 @@ -311,17 +311,17 @@ uvm_ce_hal_t ce_ops; // arch_ops: id is an architecture uvm_arch_hal_t arch_ops; // fault_buffer_ops: id is a hardware class uvm_fault_buffer_hal_t fault_buffer_ops; } u; -} uvm_hal_class_ops_t; +} __do_const uvm_hal_class_ops_t; // When UVM next support is enabled support for future chips in the hal is // enabled by providing additional hal table entries below. #if UVM_IS_NEXT() extern uvm_hal_class_ops_t *ce_table_next; extern uvm_hal_class_ops_t *host_table_next; extern uvm_hal_class_ops_t *arch_table_next; extern uvm_hal_class_ops_t *fault_buffer_table_next; diff -r -U8 kernel_/nvidia-uvm/uvm8_mmu.h kernel/nvidia-uvm/uvm8_mmu.h --- kernel_/nvidia-uvm/uvm8_mmu.h 2016-09-02 03:30:48.000000000 +0200 +++ kernel/nvidia-uvm/uvm8_mmu.h 2016-09-13 01:45:09.119908464 +0200 @@ -19,17 +19,16 @@ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #ifndef __UVM8_MMU_H__ #define __UVM8_MMU_H__ -#include "uvm8_forward_decl.h" #include "uvm8_hal_types.h" #include "uvm8_pmm_gpu.h" #include "uvmtypes.h" #include "uvm_common.h" #include "uvm8_tracker.h" #include "uvm8_test_ioctl.h" // Used when the page size isn't known and should not matter diff -r -U8 kernel_/nvidia-uvm/uvm_common.c kernel/nvidia-uvm/uvm_common.c --- kernel_/nvidia-uvm/uvm_common.c 2016-09-02 03:30:48.000000000 +0200 +++ kernel/nvidia-uvm/uvm_common.c 2016-09-13 02:07:06.967873728 +0200 @@ -46,17 +46,16 @@ #include "uvm8_init.h" #include "uvm8_forward_decl.h" // TODO: Bug 1710855: Tweak this number through benchmarks #define UVM_SPIN_LOOP_SCHEDULE_TIMEOUT_NS (10*1000ULL) #define UVM_SPIN_LOOP_PRINT_TIMEOUT_SEC 30ULL static dev_t g_uvmBaseDev; -struct UvmOpsUvmEvents g_exportedUvmOps; static char* uvm_driver_mode = "8"; // UVM-Lite behavior is activated through insmod argument: (default is "8") // insmod nvidia-uvm.ko uvm_driver_mode="lite" // // Similarly, the UVM-8 production driver can be activated with: // insmod nvidia-uvm.ko uvm_driver_mode="8" @@ -173,18 +172,20 @@ return nvStatus; } static NV_STATUS uvmSetupGpuProvider(void) { NV_STATUS status = NV_OK; - g_exportedUvmOps.startDevice = uvm_gpu_event_start_device; - g_exportedUvmOps.stopDevice = uvm_gpu_event_stop_device; + static struct UvmOpsUvmEvents g_exportedUvmOps = { + .startDevice = uvm_gpu_event_start_device, + .stopDevice = uvm_gpu_event_stop_device, + }; // call RM to exchange the function pointers. status = nvUvmInterfaceRegisterUvmCallbacks(&g_exportedUvmOps); return status; } static int uvm_not8_init(dev_t uvmBaseDev) { diff -r -U8 kernel_/nvidia-uvm/uvm_full_fault_buffer.h kernel/nvidia-uvm/uvm_full_fault_buffer.h --- kernel_/nvidia-uvm/uvm_full_fault_buffer.h 2016-09-02 03:30:48.000000000 +0200 +++ kernel/nvidia-uvm/uvm_full_fault_buffer.h 2016-09-13 02:14:43.967861682 +0200 @@ -25,16 +25,17 @@ // uvm_full_fault_buffer.h // // This file contains structures and function declarations to read or update // the fault buffer/related registers and read/mask/unmask the fault intr. // #ifndef _UVM_FULL_FAULT_BUFFER_H_ #define _UVM_FULL_FAULT_BUFFER_H_ +#include #include "uvmtypes.h" #define MAXWELL_FAULT_BUFFER_A (0xb069) #define MEM_RD32(a) (*(const volatile NvU32 *)(a)) #define MEM_WR32(a, d) do { *(volatile NvU32 *)(a) = (d); } while (0) typedef enum { UvmReplayType_none, @@ -298,17 +299,17 @@ NvUvmSetReplayParamsReg_t setReplayParamsReg; NvUvmGetFaultPacketSize_t getFaultPacketSize; NvUvmWriteFaultBufferPacket_t writeFaultBufferPacket; NvUvmIsFaultInterruptPending_t isFaultIntrPending; NvUvmSetFaultIntrBit_t setFaultIntrBit; NvUvmControlPrefetch_t controlPrefetch; NvUvmTestFaultBufferOverflow_t testFaultBufferOverflow; NvUvmClearFaultBufferOverflow_t clearFaultBufferOverflow; -} UvmFaultBufferOps; +} __no_const UvmFaultBufferOps; /****************************************************************************** uvmfull_fault_buffer_init Initialze fault buffer management related function pointers for a fault class Arguments: faultBufferClass: (INPUT) diff -r -U8 kernel_/nvidia-uvm/uvm_linux.h kernel/nvidia-uvm/uvm_linux.h --- kernel_/nvidia-uvm/uvm_linux.h 2016-09-02 03:30:48.000000000 +0200 +++ kernel/nvidia-uvm/uvm_linux.h 2016-09-13 01:47:41.487904448 +0200 @@ -415,17 +415,17 @@ // Added in 2.6.24 #ifndef ACCESS_ONCE #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #endif // WRITE_ONCE/READ_ONCE have incompatible definitions across versions, which produces warnings. // Therefore, we define our own macros -#define UVM_WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val)) +#define UVM_WRITE_ONCE(x, val) (ACCESS_ONCE_RW(x) = (val)) #define UVM_READ_ONCE(x) ACCESS_ONCE(x) // Added in 3.11 #ifndef PAGE_ALIGNED #define PAGE_ALIGNED(addr) (((addr) & (PAGE_SIZE - 1)) == 0) #endif // Added in 2.6.37 via commit e1ca7788dec6773b1a2bce51b7141948f2b8bccf