Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 464510 Details for
Bug 610110
x11-drivers/nvidia-drivers-378.13 fails to compile with kernel 4.10.0: error: ‘CPU_DOWN_FAILED’ undeclared
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
Kernel patch for nvidia drivers to work with kernel 4.10.0
nvidia-drivers-378.13-fix.patch (text/plain), 10.15 KB, created by
ganthore
on 2017-02-21 13:19:41 UTC
(
hide
)
Description:
Kernel patch for nvidia drivers to work with kernel 4.10.0
Filename:
MIME Type:
Creator:
ganthore
Created:
2017-02-21 13:19:41 UTC
Size:
10.15 KB
patch
obsolete
>diff -ur NVIDIA-Linux-x86_64-378.13/kernel/common/inc/nv-linux.h NVIDIA-Linux-x86_64-378.13.patched/kernel/common/inc/nv-linux.h >--- NVIDIA-Linux-x86_64-378.13/kernel/common/inc/nv-linux.h 2017-02-08 04:58:34.000000000 +0100 >+++ NVIDIA-Linux-x86_64-378.13.patched/kernel/common/inc/nv-linux.h 2017-02-15 04:14:46.304110124 +0100 >@@ -294,7 +294,8 @@ > > extern int nv_pat_mode; > >-#if defined(CONFIG_HOTPLUG_CPU) >+//#if defined(CONFIG_HOTPLUG_CPU) >+#if 0 > #define NV_ENABLE_HOTPLUG_CPU > #include <linux/cpu.h> /* CPU hotplug support */ > #include <linux/notifier.h> /* struct notifier_block, etc */ >diff -ur NVIDIA-Linux-x86_64-378.13/kernel/nvidia/nv-p2p.c NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia/nv-p2p.c >--- NVIDIA-Linux-x86_64-378.13/kernel/nvidia/nv-p2p.c 2017-02-08 04:58:34.000000000 +0100 >+++ NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia/nv-p2p.c 2017-02-15 04:14:46.304110124 +0100 >@@ -146,7 +146,7 @@ > int nvidia_p2p_get_pages( > uint64_t p2p_token, > uint32_t va_space, >- uint64_t virtual_address, >+ uint64_t address, > uint64_t length, > struct nvidia_p2p_page_table **page_table, > void (*free_callback)(void * data), >@@ -211,7 +211,7 @@ > } > > status = rm_p2p_get_pages(sp, p2p_token, va_space, >- virtual_address, length, physical_addresses, wreqmb_h, >+ address, length, physical_addresses, wreqmb_h, > rreqmb_h, &entries, &gpu_uuid, *page_table, > free_callback, data); > if (status != NV_OK) >@@ -286,7 +286,7 @@ > > if (bGetPages) > { >- rm_p2p_put_pages(sp, p2p_token, va_space, virtual_address, >+ rm_p2p_put_pages(sp, p2p_token, va_space, address, > gpu_uuid, *page_table); > } > >@@ -329,7 +329,7 @@ > int nvidia_p2p_put_pages( > uint64_t p2p_token, > uint32_t va_space, >- uint64_t virtual_address, >+ uint64_t address, > struct nvidia_p2p_page_table *page_table > ) > { >@@ -343,7 +343,7 @@ > return rc; > } > >- status = rm_p2p_put_pages(sp, p2p_token, va_space, virtual_address, >+ status = rm_p2p_put_pages(sp, p2p_token, va_space, address, > page_table->gpu_uuid, page_table); > if (status == NV_OK) > nvidia_p2p_free_page_table(page_table); >diff -ur NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-fence.c NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-drm/nvidia-drm-fence.c >--- NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-fence.c 2017-02-08 04:58:37.000000000 +0100 >+++ NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-drm/nvidia-drm-fence.c 2017-02-15 04:14:46.304110124 +0100 >@@ -31,7 +31,7 @@ > > #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) > struct nv_fence { >- struct fence base; >+ struct dma_fence base; > spinlock_t lock; > > struct nvidia_drm_device *nv_dev; >@@ -51,7 +51,7 @@ > > static const char *nvidia_drm_gem_prime_fence_op_get_driver_name > ( >- struct fence *fence >+ struct dma_fence *fence > ) > { > return "NVIDIA"; >@@ -59,7 +59,7 @@ > > static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name > ( >- struct fence *fence >+ struct dma_fence *fence > ) > { > return "nvidia.prime"; >@@ -67,7 +67,7 @@ > > static bool nvidia_drm_gem_prime_fence_op_signaled > ( >- struct fence *fence >+ struct dma_fence *fence > ) > { > struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); >@@ -99,7 +99,7 @@ > > static bool nvidia_drm_gem_prime_fence_op_enable_signaling > ( >- struct fence *fence >+ struct dma_fence *fence > ) > { > bool ret = true; >@@ -107,7 +107,7 @@ > struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem; > struct nvidia_drm_device *nv_dev = nv_fence->nv_dev; > >- if (fence_is_signaled(fence)) >+ if (dma_fence_is_signaled(fence)) > { > return false; > } >@@ -136,7 +136,7 @@ > } > > nv_gem->fenceContext.softFence = fence; >- fence_get(fence); >+ dma_fence_get(fence); > > unlock_struct_mutex: > mutex_unlock(&nv_dev->dev->struct_mutex); >@@ -146,7 +146,7 @@ > > static void nvidia_drm_gem_prime_fence_op_release > ( >- struct fence *fence >+ struct dma_fence *fence > ) > { > struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); >@@ -155,7 +155,7 @@ > > static signed long nvidia_drm_gem_prime_fence_op_wait > ( >- struct fence *fence, >+ struct dma_fence *fence, > bool intr, > signed long timeout > ) >@@ -170,12 +170,12 @@ > * that it should never get hit during normal operation, but not so long > * that the system becomes unresponsive. > */ >- return fence_default_wait(fence, intr, >+ return dma_fence_default_wait(fence, intr, > (timeout == MAX_SCHEDULE_TIMEOUT) ? > msecs_to_jiffies(96) : timeout); > } > >-static const struct fence_ops nvidia_drm_gem_prime_fence_ops = { >+static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = { > .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name, > .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name, > .signaled = nvidia_drm_gem_prime_fence_op_signaled, >@@ -285,7 +285,7 @@ > bool force > ) > { >- struct fence *fence = nv_gem->fenceContext.softFence; >+ struct dma_fence *fence = nv_gem->fenceContext.softFence; > > WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex)); > >@@ -301,10 +301,10 @@ > > if (force || nv_fence_ready_to_signal(nv_fence)) > { >- fence_signal(&nv_fence->base); >+ dma_fence_signal(&nv_fence->base); > > nv_gem->fenceContext.softFence = NULL; >- fence_put(&nv_fence->base); >+ dma_fence_put(&nv_fence->base); > > nvKms->disableChannelEvent(nv_dev->pDevice, > nv_gem->fenceContext.cb); >@@ -320,7 +320,7 @@ > > nv_fence = container_of(fence, struct nv_fence, base); > >- fence_signal(&nv_fence->base); >+ dma_fence_signal(&nv_fence->base); > } > } > >@@ -513,7 +513,7 @@ > * fence_context_alloc() cannot fail, so we do not need to check a return > * value. > */ >- nv_gem->fenceContext.context = fence_context_alloc(1); >+ nv_gem->fenceContext.context = dma_fence_context_alloc(1); > > ret = nvidia_drm_gem_prime_fence_import_semaphore( > nv_dev, nv_gem, p->index, >@@ -670,7 +670,7 @@ > nv_fence->nv_gem = nv_gem; > > spin_lock_init(&nv_fence->lock); >- fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, >+ dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, > &nv_fence->lock, nv_gem->fenceContext.context, > p->sem_thresh); > >@@ -680,7 +680,7 @@ > > reservation_object_add_excl_fence(&nv_gem->fenceContext.resv, > &nv_fence->base); >- fence_put(&nv_fence->base); /* Reservation object has reference */ >+ dma_fence_put(&nv_fence->base); /* Reservation object has reference */ > > ret = 0; > >diff -ur NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-gem.h NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-drm/nvidia-drm-gem.h >--- NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-gem.h 2017-02-08 04:58:37.000000000 +0100 >+++ NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-drm/nvidia-drm-gem.h 2017-02-15 04:14:46.304110124 +0100 >@@ -101,7 +101,7 @@ > /* Software signaling structures */ > struct NvKmsKapiChannelEvent *cb; > struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs; >- struct fence *softFence; /* Fence for software signaling */ >+ struct dma_fence *softFence; /* Fence for software signaling */ > } fenceContext; > #endif > }; >diff -ur NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-modeset.c NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-drm/nvidia-drm-modeset.c >--- NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-modeset.c 2017-02-08 04:58:37.000000000 +0100 >+++ NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-drm/nvidia-drm-modeset.c 2017-02-15 04:14:46.305110124 +0100 >@@ -78,8 +78,7 @@ > > void nvidia_drm_atomic_state_free(struct drm_atomic_state *state) > { >- struct nvidia_drm_atomic_state *nv_state = >- to_nv_atomic_state(state); >+ struct nvidia_drm_atomic_state *nv_state = to_nv_atomic_state(state); > drm_atomic_state_default_release(state); > nvidia_drm_free(nv_state); > } >diff -ur NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-priv.h NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-drm/nvidia-drm-priv.h >--- NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-priv.h 2017-02-08 04:58:37.000000000 +0100 >+++ NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-drm/nvidia-drm-priv.h 2017-02-15 04:14:46.305110124 +0100 >@@ -34,7 +34,7 @@ > #endif > > #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) >-#include <linux/fence.h> >+#include <linux/dma-fence.h> > #include <linux/reservation.h> > #endif > >diff -ur NVIDIA-Linux-x86_64-378.13/kernel/nvidia-uvm/uvm8_test.c NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-uvm/uvm8_test.c >--- NVIDIA-Linux-x86_64-378.13/kernel/nvidia-uvm/uvm8_test.c 2017-02-08 04:58:36.000000000 +0100 >+++ NVIDIA-Linux-x86_64-378.13.patched/kernel/nvidia-uvm/uvm8_test.c 2017-02-15 04:14:46.305110124 +0100 >@@ -103,7 +103,7 @@ > return NV_ERR_INVALID_STATE; > } > >-static NV_STATUS uvm8_test_get_kernel_virtual_address( >+static NV_STATUS uvm8_test_get_kernel_address( > UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS_PARAMS *params, > struct file *filp) > { >@@ -173,7 +173,7 @@ > UVM_ROUTE_CMD_STACK(UVM_TEST_RANGE_GROUP_RANGE_COUNT, uvm8_test_range_group_range_count); > UVM_ROUTE_CMD_STACK(UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_get_prefetch_faults_reenable_lapse); > UVM_ROUTE_CMD_STACK(UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_set_prefetch_faults_reenable_lapse); >- UVM_ROUTE_CMD_STACK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS, uvm8_test_get_kernel_virtual_address); >+ UVM_ROUTE_CMD_STACK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS, uvm8_test_get_kernel_address); > UVM_ROUTE_CMD_STACK(UVM_TEST_PMA_ALLOC_FREE, uvm8_test_pma_alloc_free); > UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_ALLOC_FREE_ROOT, uvm8_test_pmm_alloc_free_root); > UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR, uvm8_test_pmm_inject_pma_evict_error);
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 610110
:
464456
|
464508
| 464510