Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 610110 | Differences between
and this patch

Collapse All | Expand All

(-)NVIDIA-Linux-x86_64-378.13/kernel/common/inc/nv-linux.h (-1 / +2 lines)
Lines 294-300 Link Here
294
294
295
extern int nv_pat_mode;
295
extern int nv_pat_mode;
296
296
297
#if defined(CONFIG_HOTPLUG_CPU)
297
//#if defined(CONFIG_HOTPLUG_CPU)
298
#if 0
298
#define NV_ENABLE_HOTPLUG_CPU
299
#define NV_ENABLE_HOTPLUG_CPU
299
#include <linux/cpu.h>              /* CPU hotplug support              */
300
#include <linux/cpu.h>              /* CPU hotplug support              */
300
#include <linux/notifier.h>         /* struct notifier_block, etc       */
301
#include <linux/notifier.h>         /* struct notifier_block, etc       */
(-)NVIDIA-Linux-x86_64-378.13/kernel/nvidia/nv-p2p.c (-5 / +5 lines)
Lines 146-152 Link Here
146
int nvidia_p2p_get_pages(
146
int nvidia_p2p_get_pages(
147
    uint64_t p2p_token,
147
    uint64_t p2p_token,
148
    uint32_t va_space,
148
    uint32_t va_space,
149
    uint64_t virtual_address,
149
    uint64_t address,
150
    uint64_t length,
150
    uint64_t length,
151
    struct nvidia_p2p_page_table **page_table,
151
    struct nvidia_p2p_page_table **page_table,
152
    void (*free_callback)(void * data),
152
    void (*free_callback)(void * data),
Lines 211-217 Link Here
211
    }
211
    }
212
212
213
    status = rm_p2p_get_pages(sp, p2p_token, va_space,
213
    status = rm_p2p_get_pages(sp, p2p_token, va_space,
214
            virtual_address, length, physical_addresses, wreqmb_h,
214
            address, length, physical_addresses, wreqmb_h,
215
            rreqmb_h, &entries, &gpu_uuid, *page_table,
215
            rreqmb_h, &entries, &gpu_uuid, *page_table,
216
            free_callback, data);
216
            free_callback, data);
217
    if (status != NV_OK)
217
    if (status != NV_OK)
Lines 286-292 Link Here
286
286
287
    if (bGetPages)
287
    if (bGetPages)
288
    {
288
    {
289
        rm_p2p_put_pages(sp, p2p_token, va_space, virtual_address,
289
        rm_p2p_put_pages(sp, p2p_token, va_space, address,
290
                gpu_uuid, *page_table);
290
                gpu_uuid, *page_table);
291
    }
291
    }
292
292
Lines 329-335 Link Here
329
int nvidia_p2p_put_pages(
329
int nvidia_p2p_put_pages(
330
    uint64_t p2p_token,
330
    uint64_t p2p_token,
331
    uint32_t va_space,
331
    uint32_t va_space,
332
    uint64_t virtual_address,
332
    uint64_t address,
333
    struct nvidia_p2p_page_table *page_table
333
    struct nvidia_p2p_page_table *page_table
334
)
334
)
335
{
335
{
Lines 343-349 Link Here
343
        return rc;
343
        return rc;
344
    }
344
    }
345
345
346
    status = rm_p2p_put_pages(sp, p2p_token, va_space, virtual_address,
346
    status = rm_p2p_put_pages(sp, p2p_token, va_space, address,
347
            page_table->gpu_uuid, page_table);
347
            page_table->gpu_uuid, page_table);
348
    if (status == NV_OK)
348
    if (status == NV_OK)
349
        nvidia_p2p_free_page_table(page_table);
349
        nvidia_p2p_free_page_table(page_table);
(-)NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-fence.c (-18 / +18 lines)
Lines 31-37 Link Here
31
31
32
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
32
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
33
struct nv_fence {
33
struct nv_fence {
34
    struct fence base;
34
    struct dma_fence base;
35
    spinlock_t lock;
35
    spinlock_t lock;
36
36
37
    struct nvidia_drm_device *nv_dev;
37
    struct nvidia_drm_device *nv_dev;
Lines 51-57 Link Here
51
51
52
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
52
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
53
(
53
(
54
    struct fence *fence
54
    struct dma_fence *fence
55
)
55
)
56
{
56
{
57
    return "NVIDIA";
57
    return "NVIDIA";
Lines 59-65 Link Here
59
59
60
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
60
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
61
(
61
(
62
    struct fence *fence
62
    struct dma_fence *fence
63
)
63
)
64
{
64
{
65
    return "nvidia.prime";
65
    return "nvidia.prime";
Lines 67-73 Link Here
67
67
68
static bool nvidia_drm_gem_prime_fence_op_signaled
68
static bool nvidia_drm_gem_prime_fence_op_signaled
69
(
69
(
70
    struct fence *fence
70
    struct dma_fence *fence
71
)
71
)
72
{
72
{
73
    struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
73
    struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
Lines 99-105 Link Here
99
99
100
static bool nvidia_drm_gem_prime_fence_op_enable_signaling
100
static bool nvidia_drm_gem_prime_fence_op_enable_signaling
101
(
101
(
102
    struct fence *fence
102
    struct dma_fence *fence
103
)
103
)
104
{
104
{
105
    bool ret = true;
105
    bool ret = true;
Lines 107-113 Link Here
107
    struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem;
107
    struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem;
108
    struct nvidia_drm_device *nv_dev = nv_fence->nv_dev;
108
    struct nvidia_drm_device *nv_dev = nv_fence->nv_dev;
109
109
110
    if (fence_is_signaled(fence))
110
    if (dma_fence_is_signaled(fence))
111
    {
111
    {
112
        return false;
112
        return false;
113
    }
113
    }
Lines 136-142 Link Here
136
    }
136
    }
137
137
138
    nv_gem->fenceContext.softFence = fence;
138
    nv_gem->fenceContext.softFence = fence;
139
    fence_get(fence);
139
    dma_fence_get(fence);
140
140
141
unlock_struct_mutex:
141
unlock_struct_mutex:
142
    mutex_unlock(&nv_dev->dev->struct_mutex);
142
    mutex_unlock(&nv_dev->dev->struct_mutex);
Lines 146-152 Link Here
146
146
147
static void nvidia_drm_gem_prime_fence_op_release
147
static void nvidia_drm_gem_prime_fence_op_release
148
(
148
(
149
    struct fence *fence
149
    struct dma_fence *fence
150
)
150
)
151
{
151
{
152
    struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
152
    struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
Lines 155-161 Link Here
155
155
156
static signed long nvidia_drm_gem_prime_fence_op_wait
156
static signed long nvidia_drm_gem_prime_fence_op_wait
157
(
157
(
158
    struct fence *fence,
158
    struct dma_fence *fence,
159
    bool intr,
159
    bool intr,
160
    signed long timeout
160
    signed long timeout
161
)
161
)
Lines 170-181 Link Here
170
     * that it should never get hit during normal operation, but not so long
170
     * that it should never get hit during normal operation, but not so long
171
     * that the system becomes unresponsive.
171
     * that the system becomes unresponsive.
172
     */
172
     */
173
    return fence_default_wait(fence, intr,
173
    return dma_fence_default_wait(fence, intr,
174
                              (timeout == MAX_SCHEDULE_TIMEOUT) ?
174
                              (timeout == MAX_SCHEDULE_TIMEOUT) ?
175
                                  msecs_to_jiffies(96) : timeout);
175
                                  msecs_to_jiffies(96) : timeout);
176
}
176
}
177
177
178
static const struct fence_ops nvidia_drm_gem_prime_fence_ops = {
178
static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = {
179
    .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name,
179
    .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name,
180
    .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name,
180
    .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name,
181
    .signaled = nvidia_drm_gem_prime_fence_op_signaled,
181
    .signaled = nvidia_drm_gem_prime_fence_op_signaled,
Lines 285-291 Link Here
285
    bool force
285
    bool force
286
)
286
)
287
{
287
{
288
    struct fence *fence = nv_gem->fenceContext.softFence;
288
    struct dma_fence *fence = nv_gem->fenceContext.softFence;
289
289
290
    WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex));
290
    WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex));
291
291
Lines 301-310 Link Here
301
301
302
        if (force || nv_fence_ready_to_signal(nv_fence))
302
        if (force || nv_fence_ready_to_signal(nv_fence))
303
        {
303
        {
304
            fence_signal(&nv_fence->base);
304
            dma_fence_signal(&nv_fence->base);
305
305
306
            nv_gem->fenceContext.softFence = NULL;
306
            nv_gem->fenceContext.softFence = NULL;
307
            fence_put(&nv_fence->base);
307
            dma_fence_put(&nv_fence->base);
308
308
309
            nvKms->disableChannelEvent(nv_dev->pDevice,
309
            nvKms->disableChannelEvent(nv_dev->pDevice,
310
                                       nv_gem->fenceContext.cb);
310
                                       nv_gem->fenceContext.cb);
Lines 320-326 Link Here
320
320
321
        nv_fence = container_of(fence, struct nv_fence, base);
321
        nv_fence = container_of(fence, struct nv_fence, base);
322
322
323
        fence_signal(&nv_fence->base);
323
        dma_fence_signal(&nv_fence->base);
324
    }
324
    }
325
}
325
}
326
326
Lines 513-519 Link Here
513
     * fence_context_alloc() cannot fail, so we do not need to check a return
513
     * fence_context_alloc() cannot fail, so we do not need to check a return
514
     * value.
514
     * value.
515
     */
515
     */
516
    nv_gem->fenceContext.context = fence_context_alloc(1);
516
    nv_gem->fenceContext.context = dma_fence_context_alloc(1);
517
517
518
    ret = nvidia_drm_gem_prime_fence_import_semaphore(
518
    ret = nvidia_drm_gem_prime_fence_import_semaphore(
519
              nv_dev, nv_gem, p->index,
519
              nv_dev, nv_gem, p->index,
Lines 670-676 Link Here
670
    nv_fence->nv_gem = nv_gem;
670
    nv_fence->nv_gem = nv_gem;
671
671
672
    spin_lock_init(&nv_fence->lock);
672
    spin_lock_init(&nv_fence->lock);
673
    fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
673
    dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
674
               &nv_fence->lock, nv_gem->fenceContext.context,
674
               &nv_fence->lock, nv_gem->fenceContext.context,
675
               p->sem_thresh);
675
               p->sem_thresh);
676
676
Lines 680-686 Link Here
680
680
681
    reservation_object_add_excl_fence(&nv_gem->fenceContext.resv,
681
    reservation_object_add_excl_fence(&nv_gem->fenceContext.resv,
682
                                      &nv_fence->base);
682
                                      &nv_fence->base);
683
    fence_put(&nv_fence->base); /* Reservation object has reference */
683
    dma_fence_put(&nv_fence->base); /* Reservation object has reference */
684
684
685
    ret = 0;
685
    ret = 0;
686
686
(-)NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-gem.h (-1 / +1 lines)
Lines 101-107 Link Here
101
        /* Software signaling structures */
101
        /* Software signaling structures */
102
        struct NvKmsKapiChannelEvent *cb;
102
        struct NvKmsKapiChannelEvent *cb;
103
        struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs;
103
        struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs;
104
        struct fence *softFence; /* Fence for software signaling */
104
        struct dma_fence *softFence; /* Fence for software signaling */
105
    } fenceContext;
105
    } fenceContext;
106
#endif
106
#endif
107
};
107
};
(-)NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-modeset.c (-2 / +1 lines)
Lines 78-85 Link Here
78
78
79
void nvidia_drm_atomic_state_free(struct drm_atomic_state *state)
79
void nvidia_drm_atomic_state_free(struct drm_atomic_state *state)
80
{
80
{
81
    struct nvidia_drm_atomic_state *nv_state =
81
    struct nvidia_drm_atomic_state *nv_state = to_nv_atomic_state(state);
82
                    to_nv_atomic_state(state);
83
    drm_atomic_state_default_release(state);
82
    drm_atomic_state_default_release(state);
84
    nvidia_drm_free(nv_state);
83
    nvidia_drm_free(nv_state);
85
}
84
}
(-)NVIDIA-Linux-x86_64-378.13/kernel/nvidia-drm/nvidia-drm-priv.h (-1 / +1 lines)
Lines 34-40 Link Here
34
#endif
34
#endif
35
35
36
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
36
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
37
#include <linux/fence.h>
37
#include <linux/dma-fence.h>
38
#include <linux/reservation.h>
38
#include <linux/reservation.h>
39
#endif
39
#endif
40
40
(-)NVIDIA-Linux-x86_64-378.13/kernel/nvidia-uvm/uvm8_test.c (-2 / +2 lines)
Lines 103-109 Link Here
103
    return NV_ERR_INVALID_STATE;
103
    return NV_ERR_INVALID_STATE;
104
}
104
}
105
105
106
static NV_STATUS uvm8_test_get_kernel_virtual_address(
106
static NV_STATUS uvm8_test_get_kernel_address(
107
        UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS_PARAMS *params,
107
        UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS_PARAMS *params,
108
        struct file *filp)
108
        struct file *filp)
109
{
109
{
Lines 173-179 Link Here
173
        UVM_ROUTE_CMD_STACK(UVM_TEST_RANGE_GROUP_RANGE_COUNT,       uvm8_test_range_group_range_count);
173
        UVM_ROUTE_CMD_STACK(UVM_TEST_RANGE_GROUP_RANGE_COUNT,       uvm8_test_range_group_range_count);
174
        UVM_ROUTE_CMD_STACK(UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_get_prefetch_faults_reenable_lapse);
174
        UVM_ROUTE_CMD_STACK(UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_get_prefetch_faults_reenable_lapse);
175
        UVM_ROUTE_CMD_STACK(UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_set_prefetch_faults_reenable_lapse);
175
        UVM_ROUTE_CMD_STACK(UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE, uvm8_test_set_prefetch_faults_reenable_lapse);
176
        UVM_ROUTE_CMD_STACK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS,    uvm8_test_get_kernel_virtual_address);
176
        UVM_ROUTE_CMD_STACK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS,    uvm8_test_get_kernel_address);
177
        UVM_ROUTE_CMD_STACK(UVM_TEST_PMA_ALLOC_FREE,                uvm8_test_pma_alloc_free);
177
        UVM_ROUTE_CMD_STACK(UVM_TEST_PMA_ALLOC_FREE,                uvm8_test_pma_alloc_free);
178
        UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_ALLOC_FREE_ROOT,           uvm8_test_pmm_alloc_free_root);
178
        UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_ALLOC_FREE_ROOT,           uvm8_test_pmm_alloc_free_root);
179
        UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR,    uvm8_test_pmm_inject_pma_evict_error);
179
        UVM_ROUTE_CMD_STACK(UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR,    uvm8_test_pmm_inject_pma_evict_error);

Return to bug 610110