Lines 31-37
Link Here
|
31 |
|
31 |
|
32 |
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) |
32 |
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) |
33 |
struct nv_fence { |
33 |
struct nv_fence { |
34 |
struct fence base; |
34 |
struct dma_fence base; |
35 |
spinlock_t lock; |
35 |
spinlock_t lock; |
36 |
|
36 |
|
37 |
struct nvidia_drm_device *nv_dev; |
37 |
struct nvidia_drm_device *nv_dev; |
Lines 51-57
Link Here
|
51 |
|
51 |
|
52 |
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name |
52 |
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name |
53 |
( |
53 |
( |
54 |
struct fence *fence |
54 |
struct dma_fence *fence |
55 |
) |
55 |
) |
56 |
{ |
56 |
{ |
57 |
return "NVIDIA"; |
57 |
return "NVIDIA"; |
Lines 59-65
Link Here
|
59 |
|
59 |
|
60 |
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name |
60 |
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name |
61 |
( |
61 |
( |
62 |
struct fence *fence |
62 |
struct dma_fence *fence |
63 |
) |
63 |
) |
64 |
{ |
64 |
{ |
65 |
return "nvidia.prime"; |
65 |
return "nvidia.prime"; |
Lines 67-73
Link Here
|
67 |
|
67 |
|
68 |
static bool nvidia_drm_gem_prime_fence_op_signaled |
68 |
static bool nvidia_drm_gem_prime_fence_op_signaled |
69 |
( |
69 |
( |
70 |
struct fence *fence |
70 |
struct dma_fence *fence |
71 |
) |
71 |
) |
72 |
{ |
72 |
{ |
73 |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); |
73 |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); |
Lines 99-105
Link Here
|
99 |
|
99 |
|
100 |
static bool nvidia_drm_gem_prime_fence_op_enable_signaling |
100 |
static bool nvidia_drm_gem_prime_fence_op_enable_signaling |
101 |
( |
101 |
( |
102 |
struct fence *fence |
102 |
struct dma_fence *fence |
103 |
) |
103 |
) |
104 |
{ |
104 |
{ |
105 |
bool ret = true; |
105 |
bool ret = true; |
Lines 107-113
Link Here
|
107 |
struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem; |
107 |
struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem; |
108 |
struct nvidia_drm_device *nv_dev = nv_fence->nv_dev; |
108 |
struct nvidia_drm_device *nv_dev = nv_fence->nv_dev; |
109 |
|
109 |
|
110 |
if (fence_is_signaled(fence)) |
110 |
if (dma_fence_is_signaled(fence)) |
111 |
{ |
111 |
{ |
112 |
return false; |
112 |
return false; |
113 |
} |
113 |
} |
Lines 136-142
Link Here
|
136 |
} |
136 |
} |
137 |
|
137 |
|
138 |
nv_gem->fenceContext.softFence = fence; |
138 |
nv_gem->fenceContext.softFence = fence; |
139 |
fence_get(fence); |
139 |
dma_fence_get(fence); |
140 |
|
140 |
|
141 |
unlock_struct_mutex: |
141 |
unlock_struct_mutex: |
142 |
mutex_unlock(&nv_dev->dev->struct_mutex); |
142 |
mutex_unlock(&nv_dev->dev->struct_mutex); |
Lines 146-152
Link Here
|
146 |
|
146 |
|
147 |
static void nvidia_drm_gem_prime_fence_op_release |
147 |
static void nvidia_drm_gem_prime_fence_op_release |
148 |
( |
148 |
( |
149 |
struct fence *fence |
149 |
struct dma_fence *fence |
150 |
) |
150 |
) |
151 |
{ |
151 |
{ |
152 |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); |
152 |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); |
Lines 155-161
Link Here
|
155 |
|
155 |
|
156 |
static signed long nvidia_drm_gem_prime_fence_op_wait |
156 |
static signed long nvidia_drm_gem_prime_fence_op_wait |
157 |
( |
157 |
( |
158 |
struct fence *fence, |
158 |
struct dma_fence *fence, |
159 |
bool intr, |
159 |
bool intr, |
160 |
signed long timeout |
160 |
signed long timeout |
161 |
) |
161 |
) |
Lines 170-181
Link Here
|
170 |
* that it should never get hit during normal operation, but not so long |
170 |
* that it should never get hit during normal operation, but not so long |
171 |
* that the system becomes unresponsive. |
171 |
* that the system becomes unresponsive. |
172 |
*/ |
172 |
*/ |
173 |
return fence_default_wait(fence, intr, |
173 |
return dma_fence_default_wait(fence, intr, |
174 |
(timeout == MAX_SCHEDULE_TIMEOUT) ? |
174 |
(timeout == MAX_SCHEDULE_TIMEOUT) ? |
175 |
msecs_to_jiffies(96) : timeout); |
175 |
msecs_to_jiffies(96) : timeout); |
176 |
} |
176 |
} |
177 |
|
177 |
|
178 |
static const struct fence_ops nvidia_drm_gem_prime_fence_ops = { |
178 |
static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = { |
179 |
.get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name, |
179 |
.get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name, |
180 |
.get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name, |
180 |
.get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name, |
181 |
.signaled = nvidia_drm_gem_prime_fence_op_signaled, |
181 |
.signaled = nvidia_drm_gem_prime_fence_op_signaled, |
Lines 285-291
Link Here
|
285 |
bool force |
285 |
bool force |
286 |
) |
286 |
) |
287 |
{ |
287 |
{ |
288 |
struct fence *fence = nv_gem->fenceContext.softFence; |
288 |
struct dma_fence *fence = nv_gem->fenceContext.softFence; |
289 |
|
289 |
|
290 |
WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex)); |
290 |
WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex)); |
291 |
|
291 |
|
Lines 301-310
Link Here
|
301 |
|
301 |
|
302 |
if (force || nv_fence_ready_to_signal(nv_fence)) |
302 |
if (force || nv_fence_ready_to_signal(nv_fence)) |
303 |
{ |
303 |
{ |
304 |
fence_signal(&nv_fence->base); |
304 |
dma_fence_signal(&nv_fence->base); |
305 |
|
305 |
|
306 |
nv_gem->fenceContext.softFence = NULL; |
306 |
nv_gem->fenceContext.softFence = NULL; |
307 |
fence_put(&nv_fence->base); |
307 |
dma_fence_put(&nv_fence->base); |
308 |
|
308 |
|
309 |
nvKms->disableChannelEvent(nv_dev->pDevice, |
309 |
nvKms->disableChannelEvent(nv_dev->pDevice, |
310 |
nv_gem->fenceContext.cb); |
310 |
nv_gem->fenceContext.cb); |
Lines 320-326
Link Here
|
320 |
|
320 |
|
321 |
nv_fence = container_of(fence, struct nv_fence, base); |
321 |
nv_fence = container_of(fence, struct nv_fence, base); |
322 |
|
322 |
|
323 |
fence_signal(&nv_fence->base); |
323 |
dma_fence_signal(&nv_fence->base); |
324 |
} |
324 |
} |
325 |
} |
325 |
} |
326 |
|
326 |
|
Lines 510-519
Link Here
|
510 |
} |
510 |
} |
511 |
|
511 |
|
512 |
/* |
512 |
/* |
513 |
* fence_context_alloc() cannot fail, so we do not need to check a return |
513 |
* dma_fence_context_alloc() cannot fail, so we do not need to check a |
514 |
* value. |
514 |
* return value. |
515 |
*/ |
515 |
*/ |
516 |
nv_gem->fenceContext.context = fence_context_alloc(1); |
516 |
nv_gem->fenceContext.context = dma_fence_context_alloc(1); |
517 |
|
517 |
|
518 |
ret = nvidia_drm_gem_prime_fence_import_semaphore( |
518 |
ret = nvidia_drm_gem_prime_fence_import_semaphore( |
519 |
nv_dev, nv_gem, p->index, |
519 |
nv_dev, nv_gem, p->index, |
Lines 670-678
Link Here
|
670 |
nv_fence->nv_gem = nv_gem; |
670 |
nv_fence->nv_gem = nv_gem; |
671 |
|
671 |
|
672 |
spin_lock_init(&nv_fence->lock); |
672 |
spin_lock_init(&nv_fence->lock); |
673 |
fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, |
673 |
dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, |
674 |
&nv_fence->lock, nv_gem->fenceContext.context, |
674 |
&nv_fence->lock, nv_gem->fenceContext.context, |
675 |
p->sem_thresh); |
675 |
p->sem_thresh); |
676 |
|
676 |
|
677 |
/* We could be replacing an existing exclusive fence; force signal it to |
677 |
/* We could be replacing an existing exclusive fence; force signal it to |
678 |
* unblock anyone waiting on it and clean up software signaling. */ |
678 |
* unblock anyone waiting on it and clean up software signaling. */ |
Lines 680-686
Link Here
|
680 |
|
680 |
|
681 |
reservation_object_add_excl_fence(&nv_gem->fenceContext.resv, |
681 |
reservation_object_add_excl_fence(&nv_gem->fenceContext.resv, |
682 |
&nv_fence->base); |
682 |
&nv_fence->base); |
683 |
fence_put(&nv_fence->base); /* Reservation object has reference */ |
683 |
dma_fence_put(&nv_fence->base); /* Reservation object has reference */ |
684 |
|
684 |
|
685 |
ret = 0; |
685 |
ret = 0; |
686 |
|
686 |
|