diff -dru NVIDIA-Linux-x86_64-378.13.orig/kernel/nvidia-drm/nvidia-drm-fence.c NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-fence.c --- NVIDIA-Linux-x86_64-378.13.orig/kernel/nvidia-drm/nvidia-drm-fence.c 2017-02-08 04:58:37.000000000 +0100 +++ NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-fence.c 2017-04-01 12:33:53.600560312 +0200 @@ -31,7 +31,7 @@ #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) struct nv_fence { - struct fence base; + struct dma_fence base; spinlock_t lock; struct nvidia_drm_device *nv_dev; @@ -51,7 +51,7 @@ static const char *nvidia_drm_gem_prime_fence_op_get_driver_name ( - struct fence *fence + struct dma_fence *fence ) { return "NVIDIA"; @@ -59,7 +59,7 @@ static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name ( - struct fence *fence + struct dma_fence *fence ) { return "nvidia.prime"; @@ -67,7 +67,7 @@ static bool nvidia_drm_gem_prime_fence_op_signaled ( - struct fence *fence + struct dma_fence *fence ) { struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); @@ -99,7 +99,7 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling ( - struct fence *fence + struct dma_fence *fence ) { bool ret = true; @@ -107,7 +107,7 @@ struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem; struct nvidia_drm_device *nv_dev = nv_fence->nv_dev; - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) { return false; } @@ -136,7 +136,7 @@ } nv_gem->fenceContext.softFence = fence; - fence_get(fence); + dma_fence_get(fence); unlock_struct_mutex: mutex_unlock(&nv_dev->dev->struct_mutex); @@ -146,7 +146,7 @@ static void nvidia_drm_gem_prime_fence_op_release ( - struct fence *fence + struct dma_fence *fence ) { struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); @@ -155,7 +155,7 @@ static signed long nvidia_drm_gem_prime_fence_op_wait ( - struct fence *fence, + struct dma_fence *fence, bool intr, signed long timeout ) @@ -170,12 +170,12 @@ * that it should never get hit during normal operation, but not so long * that the system becomes unresponsive. */ - return fence_default_wait(fence, intr, - (timeout == MAX_SCHEDULE_TIMEOUT) ? - msecs_to_jiffies(96) : timeout); + return dma_fence_default_wait(fence, intr, + (timeout == MAX_SCHEDULE_TIMEOUT) ? + msecs_to_jiffies(96) : timeout); } -static const struct fence_ops nvidia_drm_gem_prime_fence_ops = { +static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = { .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name, .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name, .signaled = nvidia_drm_gem_prime_fence_op_signaled, @@ -285,7 +285,7 @@ bool force ) { - struct fence *fence = nv_gem->fenceContext.softFence; + struct dma_fence *fence = nv_gem->fenceContext.softFence; WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex)); @@ -301,10 +301,10 @@ if (force || nv_fence_ready_to_signal(nv_fence)) { - fence_signal(&nv_fence->base); + dma_fence_signal(&nv_fence->base); nv_gem->fenceContext.softFence = NULL; - fence_put(&nv_fence->base); + dma_fence_put(&nv_fence->base); nvKms->disableChannelEvent(nv_dev->pDevice, nv_gem->fenceContext.cb); @@ -320,7 +320,7 @@ nv_fence = container_of(fence, struct nv_fence, base); - fence_signal(&nv_fence->base); + dma_fence_signal(&nv_fence->base); } } @@ -510,10 +510,10 @@ } /* - * fence_context_alloc() cannot fail, so we do not need to check a return - * value. + * dma_fence_context_alloc() cannot fail, so we do not need to check a + * return value. */ - nv_gem->fenceContext.context = fence_context_alloc(1); + nv_gem->fenceContext.context = dma_fence_context_alloc(1); ret = nvidia_drm_gem_prime_fence_import_semaphore( nv_dev, nv_gem, p->index, @@ -670,9 +670,9 @@ nv_fence->nv_gem = nv_gem; spin_lock_init(&nv_fence->lock); - fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, - &nv_fence->lock, nv_gem->fenceContext.context, - p->sem_thresh); + dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, + &nv_fence->lock, nv_gem->fenceContext.context, + p->sem_thresh); /* We could be replacing an existing exclusive fence; force signal it to * unblock anyone waiting on it and clean up software signaling. */ @@ -680,7 +680,7 @@ reservation_object_add_excl_fence(&nv_gem->fenceContext.resv, &nv_fence->base); - fence_put(&nv_fence->base); /* Reservation object has reference */ + dma_fence_put(&nv_fence->base); /* Reservation object has reference */ ret = 0; diff -dru NVIDIA-Linux-x86_64-378.13.orig/kernel/nvidia-drm/nvidia-drm-gem.h NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-gem.h --- NVIDIA-Linux-x86_64-378.13.orig/kernel/nvidia-drm/nvidia-drm-gem.h 2017-02-08 04:58:37.000000000 +0100 +++ NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-gem.h 2017-04-01 12:21:10.489444960 +0200 @@ -101,7 +101,7 @@ /* Software signaling structures */ struct NvKmsKapiChannelEvent *cb; struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs; - struct fence *softFence; /* Fence for software signaling */ + struct dma_fence *softFence; /* Fence for software signaling */ } fenceContext; #endif }; diff -dru NVIDIA-Linux-x86_64-378.13.orig/kernel/nvidia-drm/nvidia-drm-priv.h NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-priv.h --- NVIDIA-Linux-x86_64-378.13.orig/kernel/nvidia-drm/nvidia-drm-priv.h 2017-02-08 04:58:37.000000000 +0100 +++ NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-priv.h 2017-04-01 12:21:31.529112579 +0200 @@ -34,7 +34,7 @@ #endif #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) -#include +#include #include #endif