|
Lines 31-37
Link Here
|
| 31 |
|
31 |
|
| 32 |
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) |
32 |
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) |
| 33 |
struct nv_fence { |
33 |
struct nv_fence { |
|
|
34 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 34 |
struct fence base; |
35 |
struct fence base; |
|
|
36 |
#else |
| 37 |
struct dma_fence base; |
| 38 |
#endif |
| 35 |
spinlock_t lock; |
39 |
spinlock_t lock; |
| 36 |
|
40 |
|
| 37 |
struct nvidia_drm_device *nv_dev; |
41 |
struct nvidia_drm_device *nv_dev; |
|
Lines 51-57
nv_fence_ready_to_signal(struct nv_fence *nv_fence)
Link Here
|
| 51 |
|
55 |
|
| 52 |
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name |
56 |
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name |
| 53 |
( |
57 |
( |
|
|
58 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 54 |
struct fence *fence |
59 |
struct fence *fence |
|
|
60 |
#else |
| 61 |
struct dma_fence *fence |
| 62 |
#endif |
| 55 |
) |
63 |
) |
| 56 |
{ |
64 |
{ |
| 57 |
return "NVIDIA"; |
65 |
return "NVIDIA"; |
|
Lines 59-65
static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
Link Here
|
| 59 |
|
67 |
|
| 60 |
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name |
68 |
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name |
| 61 |
( |
69 |
( |
|
|
70 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 62 |
struct fence *fence |
71 |
struct fence *fence |
|
|
72 |
#else |
| 73 |
struct dma_fence *fence |
| 74 |
#endif |
| 63 |
) |
75 |
) |
| 64 |
{ |
76 |
{ |
| 65 |
return "nvidia.prime"; |
77 |
return "nvidia.prime"; |
|
Lines 67-73
static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
Link Here
|
| 67 |
|
79 |
|
| 68 |
static bool nvidia_drm_gem_prime_fence_op_signaled |
80 |
static bool nvidia_drm_gem_prime_fence_op_signaled |
| 69 |
( |
81 |
( |
|
|
82 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 70 |
struct fence *fence |
83 |
struct fence *fence |
|
|
84 |
#else |
| 85 |
struct dma_fence *fence |
| 86 |
#endif |
| 71 |
) |
87 |
) |
| 72 |
{ |
88 |
{ |
| 73 |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); |
89 |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); |
|
Lines 99-105
unlock_struct_mutex:
Link Here
|
| 99 |
|
115 |
|
| 100 |
static bool nvidia_drm_gem_prime_fence_op_enable_signaling |
116 |
static bool nvidia_drm_gem_prime_fence_op_enable_signaling |
| 101 |
( |
117 |
( |
|
|
118 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 102 |
struct fence *fence |
119 |
struct fence *fence |
|
|
120 |
#else |
| 121 |
struct dma_fence *fence |
| 122 |
#endif |
| 103 |
) |
123 |
) |
| 104 |
{ |
124 |
{ |
| 105 |
bool ret = true; |
125 |
bool ret = true; |
|
Lines 107-113
static bool nvidia_drm_gem_prime_fence_op_enable_signaling
Link Here
|
| 107 |
struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem; |
127 |
struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem; |
| 108 |
struct nvidia_drm_device *nv_dev = nv_fence->nv_dev; |
128 |
struct nvidia_drm_device *nv_dev = nv_fence->nv_dev; |
| 109 |
|
129 |
|
|
|
130 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 110 |
if (fence_is_signaled(fence)) |
131 |
if (fence_is_signaled(fence)) |
|
|
132 |
#else |
| 133 |
if (dma_fence_is_signaled(fence)) |
| 134 |
#endif |
| 111 |
{ |
135 |
{ |
| 112 |
return false; |
136 |
return false; |
| 113 |
} |
137 |
} |
|
Lines 136-142
static bool nvidia_drm_gem_prime_fence_op_enable_signaling
Link Here
|
| 136 |
} |
160 |
} |
| 137 |
|
161 |
|
| 138 |
nv_gem->fenceContext.softFence = fence; |
162 |
nv_gem->fenceContext.softFence = fence; |
|
|
163 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 139 |
fence_get(fence); |
164 |
fence_get(fence); |
|
|
165 |
#else |
| 166 |
dma_fence_get(fence); |
| 167 |
#endif |
| 140 |
|
168 |
|
| 141 |
unlock_struct_mutex: |
169 |
unlock_struct_mutex: |
| 142 |
mutex_unlock(&nv_dev->dev->struct_mutex); |
170 |
mutex_unlock(&nv_dev->dev->struct_mutex); |
|
Lines 146-152
unlock_struct_mutex:
Link Here
|
| 146 |
|
174 |
|
| 147 |
static void nvidia_drm_gem_prime_fence_op_release |
175 |
static void nvidia_drm_gem_prime_fence_op_release |
| 148 |
( |
176 |
( |
|
|
177 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 149 |
struct fence *fence |
178 |
struct fence *fence |
|
|
179 |
#else |
| 180 |
struct dma_fence *fence |
| 181 |
#endif |
| 150 |
) |
182 |
) |
| 151 |
{ |
183 |
{ |
| 152 |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); |
184 |
struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base); |
|
Lines 155-161
static void nvidia_drm_gem_prime_fence_op_release
Link Here
|
| 155 |
|
187 |
|
| 156 |
static signed long nvidia_drm_gem_prime_fence_op_wait |
188 |
static signed long nvidia_drm_gem_prime_fence_op_wait |
| 157 |
( |
189 |
( |
|
|
190 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 158 |
struct fence *fence, |
191 |
struct fence *fence, |
|
|
192 |
#else |
| 193 |
struct dma_fence *fence, |
| 194 |
#endif |
| 159 |
bool intr, |
195 |
bool intr, |
| 160 |
signed long timeout |
196 |
signed long timeout |
| 161 |
) |
197 |
) |
|
Lines 170-181
static signed long nvidia_drm_gem_prime_fence_op_wait
Link Here
|
| 170 |
* that it should never get hit during normal operation, but not so long |
206 |
* that it should never get hit during normal operation, but not so long |
| 171 |
* that the system becomes unresponsive. |
207 |
* that the system becomes unresponsive. |
| 172 |
*/ |
208 |
*/ |
|
|
209 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 173 |
return fence_default_wait(fence, intr, |
210 |
return fence_default_wait(fence, intr, |
|
|
211 |
#else |
| 212 |
return dma_fence_default_wait(fence, intr, |
| 213 |
#endif |
| 174 |
(timeout == MAX_SCHEDULE_TIMEOUT) ? |
214 |
(timeout == MAX_SCHEDULE_TIMEOUT) ? |
| 175 |
msecs_to_jiffies(96) : timeout); |
215 |
msecs_to_jiffies(96) : timeout); |
| 176 |
} |
216 |
} |
| 177 |
|
217 |
|
|
|
218 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 178 |
static const struct fence_ops nvidia_drm_gem_prime_fence_ops = { |
219 |
static const struct fence_ops nvidia_drm_gem_prime_fence_ops = { |
|
|
220 |
#else |
| 221 |
static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = { |
| 222 |
#endif |
| 179 |
.get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name, |
223 |
.get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name, |
| 180 |
.get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name, |
224 |
.get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name, |
| 181 |
.signaled = nvidia_drm_gem_prime_fence_op_signaled, |
225 |
.signaled = nvidia_drm_gem_prime_fence_op_signaled, |
|
Lines 285-291
static void nvidia_drm_gem_prime_fence_signal
Link Here
|
| 285 |
bool force |
329 |
bool force |
| 286 |
) |
330 |
) |
| 287 |
{ |
331 |
{ |
|
|
332 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 288 |
struct fence *fence = nv_gem->fenceContext.softFence; |
333 |
struct fence *fence = nv_gem->fenceContext.softFence; |
|
|
334 |
#else |
| 335 |
struct dma_fence *fence = nv_gem->fenceContext.softFence; |
| 336 |
#endif |
| 289 |
|
337 |
|
| 290 |
WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex)); |
338 |
WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex)); |
| 291 |
|
339 |
|
|
Lines 301-310
static void nvidia_drm_gem_prime_fence_signal
Link Here
|
| 301 |
|
349 |
|
| 302 |
if (force || nv_fence_ready_to_signal(nv_fence)) |
350 |
if (force || nv_fence_ready_to_signal(nv_fence)) |
| 303 |
{ |
351 |
{ |
|
|
352 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 304 |
fence_signal(&nv_fence->base); |
353 |
fence_signal(&nv_fence->base); |
|
|
354 |
#else |
| 355 |
dma_fence_signal(&nv_fence->base); |
| 356 |
#endif |
| 305 |
|
357 |
|
| 306 |
nv_gem->fenceContext.softFence = NULL; |
358 |
nv_gem->fenceContext.softFence = NULL; |
|
|
359 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 307 |
fence_put(&nv_fence->base); |
360 |
fence_put(&nv_fence->base); |
|
|
361 |
#else |
| 362 |
dma_fence_put(&nv_fence->base); |
| 363 |
#endif |
| 308 |
|
364 |
|
| 309 |
nvKms->disableChannelEvent(nv_dev->pDevice, |
365 |
nvKms->disableChannelEvent(nv_dev->pDevice, |
| 310 |
nv_gem->fenceContext.cb); |
366 |
nv_gem->fenceContext.cb); |
|
Lines 320-326
static void nvidia_drm_gem_prime_fence_signal
Link Here
|
| 320 |
|
376 |
|
| 321 |
nv_fence = container_of(fence, struct nv_fence, base); |
377 |
nv_fence = container_of(fence, struct nv_fence, base); |
| 322 |
|
378 |
|
|
|
379 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 323 |
fence_signal(&nv_fence->base); |
380 |
fence_signal(&nv_fence->base); |
|
|
381 |
#else |
| 382 |
dma_fence_signal(&nv_fence->base); |
| 383 |
#endif |
| 324 |
} |
384 |
} |
| 325 |
} |
385 |
} |
| 326 |
|
386 |
|
|
Lines 513-519
int nvidia_drm_gem_prime_fence_init
Link Here
|
| 513 |
* fence_context_alloc() cannot fail, so we do not need to check a return |
573 |
* fence_context_alloc() cannot fail, so we do not need to check a return |
| 514 |
* value. |
574 |
* value. |
| 515 |
*/ |
575 |
*/ |
|
|
576 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 516 |
nv_gem->fenceContext.context = fence_context_alloc(1); |
577 |
nv_gem->fenceContext.context = fence_context_alloc(1); |
|
|
578 |
#else |
| 579 |
nv_gem->fenceContext.context = dma_fence_context_alloc(1); |
| 580 |
#endif |
| 517 |
|
581 |
|
| 518 |
ret = nvidia_drm_gem_prime_fence_import_semaphore( |
582 |
ret = nvidia_drm_gem_prime_fence_import_semaphore( |
| 519 |
nv_dev, nv_gem, p->index, |
583 |
nv_dev, nv_gem, p->index, |
|
Lines 670-676
int nvidia_drm_gem_prime_fence_attach
Link Here
|
| 670 |
nv_fence->nv_gem = nv_gem; |
734 |
nv_fence->nv_gem = nv_gem; |
| 671 |
|
735 |
|
| 672 |
spin_lock_init(&nv_fence->lock); |
736 |
spin_lock_init(&nv_fence->lock); |
|
|
737 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 673 |
fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, |
738 |
fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, |
|
|
739 |
#else |
| 740 |
dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops, |
| 741 |
#endif |
| 674 |
&nv_fence->lock, nv_gem->fenceContext.context, |
742 |
&nv_fence->lock, nv_gem->fenceContext.context, |
| 675 |
p->sem_thresh); |
743 |
p->sem_thresh); |
| 676 |
|
744 |
|
|
Lines 680-686
int nvidia_drm_gem_prime_fence_attach
Link Here
|
| 680 |
|
748 |
|
| 681 |
reservation_object_add_excl_fence(&nv_gem->fenceContext.resv, |
749 |
reservation_object_add_excl_fence(&nv_gem->fenceContext.resv, |
| 682 |
&nv_fence->base); |
750 |
&nv_fence->base); |
|
|
751 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) |
| 683 |
fence_put(&nv_fence->base); /* Reservation object has reference */ |
752 |
fence_put(&nv_fence->base); /* Reservation object has reference */ |
|
|
753 |
#else |
| 754 |
dma_fence_put(&nv_fence->base); |
| 755 |
#endif |
| 684 |
|
756 |
|
| 685 |
ret = 0; |
757 |
ret = 0; |
| 686 |
|
758 |
|