Line 0
Link Here
|
|
|
1 |
/* _NVRM_COPYRIGHT_BEGIN_ |
2 |
* |
3 |
* Copyright 2001 by NVIDIA Corporation. All rights reserved. All |
4 |
* information contained herein is proprietary and confidential to NVIDIA |
5 |
* Corporation. Any use, reproduction, or disclosure without the written |
6 |
* permission of NVIDIA Corporation is prohibited. |
7 |
* |
8 |
* _NVRM_COPYRIGHT_END_ |
9 |
*/ |
10 |
|
11 |
|
12 |
#ifndef _NV_LINUX_H_ |
13 |
#define _NV_LINUX_H_ |
14 |
|
15 |
#include "nv.h" |
16 |
#include "conftest.h" |
17 |
|
18 |
#if defined(NV_GENERATED_AUTOCONF_H_PRESENT) |
19 |
#include <generated/autoconf.h> |
20 |
#else |
21 |
#include <linux/autoconf.h> |
22 |
#endif |
23 |
|
24 |
#include <linux/version.h> |
25 |
#include <linux/utsname.h> |
26 |
|
27 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 7) |
28 |
# error This driver does not support 2.4 kernels older than 2.4.7! |
29 |
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
30 |
# define KERNEL_2_4 |
31 |
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) |
32 |
# error This driver does not support 2.5 kernels! |
33 |
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) |
34 |
# define KERNEL_2_6 |
35 |
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) |
36 |
# define KERNEL_3 |
37 |
#else |
38 |
# error This driver does not support development kernels! |
39 |
#endif |
40 |
|
41 |
#if defined(KERNEL_2_4) |
42 |
#define NV_KMEM_CACHE_CREATE_PRESENT |
43 |
#define NV_KMEM_CACHE_CREATE_ARGUMENT_COUNT 6 |
44 |
#define NV_IRQ_HANDLER_T_TAKES_PTREGS |
45 |
#endif |
46 |
|
47 |
#if defined (CONFIG_SMP) && !defined (__SMP__) |
48 |
#define __SMP__ |
49 |
#endif |
50 |
|
51 |
#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) |
52 |
# define MODVERSIONS |
53 |
#endif |
54 |
|
55 |
#if defined(MODVERSIONS) && defined(KERNEL_2_4) |
56 |
#include <linux/modversions.h> |
57 |
#endif |
58 |
|
59 |
#if defined(KERNEL_2_4) && !defined(EXPORT_SYMTAB) |
60 |
#define EXPORT_SYMTAB |
61 |
#endif |
62 |
|
63 |
#include <linux/kernel.h> |
64 |
#include <linux/module.h> |
65 |
#include <linux/kmod.h> |
66 |
|
67 |
#include <linux/mm.h> |
68 |
|
69 |
#if !defined(VM_RESERVED) |
70 |
#define VM_RESERVED 0x00000000 |
71 |
#endif |
72 |
#if !defined(VM_DONTEXPAND) |
73 |
#define VM_DONTEXPAND 0x00000000 |
74 |
#endif |
75 |
#if !defined(VM_DONTDUMP) |
76 |
#define VM_DONTDUMP 0x00000000 |
77 |
#endif |
78 |
|
79 |
#include <linux/init.h> /* module_init, module_exit */ |
80 |
#include <linux/types.h> /* pic_t, size_t, __u32, etc */ |
81 |
#include <linux/errno.h> /* error codes */ |
82 |
#include <linux/list.h> /* circular linked list */ |
83 |
#include <linux/stddef.h> /* NULL, offsetof */ |
84 |
#include <linux/wait.h> /* wait queues */ |
85 |
#include <linux/string.h> /* strchr(), strpbrk() */ |
86 |
#include <linux/ctype.h> /* isspace(), etc */ |
87 |
|
88 |
#include <linux/slab.h> /* kmalloc, kfree, etc */ |
89 |
#include <linux/vmalloc.h> /* vmalloc, vfree, etc */ |
90 |
|
91 |
#include <linux/poll.h> /* poll_wait */ |
92 |
#include <linux/delay.h> /* mdelay, udelay */ |
93 |
|
94 |
#if !defined(KERNEL_2_4) |
95 |
#include <linux/sched.h> /* suser(), capable() replacement */ |
96 |
#include <linux/moduleparam.h> /* module_param() */ |
97 |
#include <asm/tlbflush.h> /* flush_tlb(), flush_tlb_all() */ |
98 |
#include <asm/kmap_types.h> /* page table entry lookup */ |
99 |
#endif |
100 |
|
101 |
#include <linux/pci.h> /* pci_find_class, etc */ |
102 |
#include <linux/interrupt.h> /* tasklets, interrupt helpers */ |
103 |
#include <linux/timer.h> |
104 |
|
105 |
#include <asm/div64.h> /* do_div() */ |
106 |
#if defined(NV_ASM_SYSTEM_H_PRESENT) |
107 |
#include <asm/system.h> /* cli, sli, save_flags */ |
108 |
#endif |
109 |
#include <asm/io.h> /* ioremap, virt_to_phys */ |
110 |
#include <asm/uaccess.h> /* access_ok */ |
111 |
#include <asm/page.h> /* PAGE_OFFSET */ |
112 |
#include <asm/pgtable.h> /* pte bit definitions */ |
113 |
|
114 |
#if defined(NVCPU_X86_64) && !defined(KERNEL_2_4) && !defined(HAVE_COMPAT_IOCTL) |
115 |
#include <linux/syscalls.h> /* sys_ioctl() */ |
116 |
#include <linux/ioctl32.h> /* register_ioctl32_conversion() */ |
117 |
#endif |
118 |
|
119 |
#if defined(NVCPU_X86_64) && defined(KERNEL_2_4) |
120 |
#include <asm/ioctl32.h> /* sys_ioctl() (ioctl32) */ |
121 |
#endif |
122 |
|
123 |
#if !defined(NV_FILE_OPERATIONS_HAS_IOCTL) && \ |
124 |
!defined(NV_FILE_OPERATIONS_HAS_UNLOCKED_IOCTL) |
125 |
#error "struct file_operations compile test likely failed!" |
126 |
#endif |
127 |
|
128 |
#if defined(CONFIG_VGA_ARB) |
129 |
#include <linux/vgaarb.h> |
130 |
#endif |
131 |
|
132 |
#include <linux/spinlock.h> |
133 |
#if defined(NV_LINUX_SEMAPHORE_H_PRESENT) |
134 |
#include <linux/semaphore.h> |
135 |
#else |
136 |
#include <asm/semaphore.h> |
137 |
#endif |
138 |
#include <linux/completion.h> |
139 |
#include <linux/highmem.h> |
140 |
|
141 |
#ifdef CONFIG_PROC_FS |
142 |
#include <linux/proc_fs.h> |
143 |
#endif |
144 |
|
145 |
#ifdef CONFIG_MTRR |
146 |
#include <asm/mtrr.h> |
147 |
#endif |
148 |
|
149 |
#ifdef CONFIG_KDB |
150 |
#include <linux/kdb.h> |
151 |
#include <asm/kdb.h> |
152 |
#endif |
153 |
|
154 |
#if defined(CONFIG_X86_REMOTE_DEBUG) |
155 |
#include <linux/gdb.h> |
156 |
#endif |
157 |
|
158 |
#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) |
159 |
#define AGPGART |
160 |
#include <linux/agp_backend.h> |
161 |
#include <linux/agpgart.h> |
162 |
#endif |
163 |
|
164 |
#if defined(NVCPU_X86) || defined(NVCPU_X86_64) |
165 |
#define NV_ENABLE_PAT_SUPPORT |
166 |
#endif |
167 |
|
168 |
#define NV_PAT_MODE_DISABLED 0 |
169 |
#define NV_PAT_MODE_KERNEL 1 |
170 |
#define NV_PAT_MODE_BUILTIN 2 |
171 |
|
172 |
extern int nv_pat_mode; |
173 |
|
174 |
#if defined(CONFIG_HOTPLUG_CPU) |
175 |
#include <linux/cpu.h> /* CPU hotplug support */ |
176 |
#include <linux/notifier.h> /* struct notifier_block, etc */ |
177 |
#endif |
178 |
|
179 |
#if (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)) |
180 |
#include <linux/i2c.h> |
181 |
#endif |
182 |
|
183 |
#if defined(KERNEL_2_4) || \ |
184 |
(NV_ACPI_WALK_NAMESPACE_ARGUMENT_COUNT == 6) |
185 |
#define NV_ACPI_WALK_NAMESPACE(type, args...) acpi_walk_namespace(type, args) |
186 |
#elif (NV_ACPI_WALK_NAMESPACE_ARGUMENT_COUNT == 7) |
187 |
#define NV_ACPI_WALK_NAMESPACE(type, start_object, max_depth, \ |
188 |
user_function, args...) \ |
189 |
acpi_walk_namespace(type, start_object, max_depth, \ |
190 |
user_function, NULL, args) |
191 |
#else |
192 |
#error "NV_ACPI_WALK_NAMESPACE_ARGUMENT_COUNT value unrecognized!" |
193 |
#endif |
194 |
|
195 |
#if defined(CONFIG_PREEMPT_RT) |
196 |
typedef atomic_spinlock_t nv_spinlock_t; |
197 |
#define NV_SPIN_LOCK_INIT(lock) atomic_spin_lock_init(lock) |
198 |
#define NV_SPIN_LOCK_IRQ(lock) atomic_spin_lock_irq(lock) |
199 |
#define NV_SPIN_UNLOCK_IRQ(lock) atomic_spin_unlock_irq(lock) |
200 |
#define NV_SPIN_LOCK_IRQSAVE(lock,flags) atomic_spin_lock_irqsave(lock,flags) |
201 |
#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) \ |
202 |
atomic_spin_unlock_irqrestore(lock,flags) |
203 |
#define NV_SPIN_LOCK(lock) atomic_spin_lock(lock) |
204 |
#define NV_SPIN_UNLOCK(lock) atomic_spin_unlock(lock) |
205 |
#define NV_SPIN_UNLOCK_WAIT(lock) atomic_spin_unlock_wait(lock) |
206 |
#else |
207 |
typedef spinlock_t nv_spinlock_t; |
208 |
#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock) |
209 |
#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock) |
210 |
#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock) |
211 |
#define NV_SPIN_LOCK_IRQSAVE(lock,flags) spin_lock_irqsave(lock,flags) |
212 |
#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) spin_unlock_irqrestore(lock,flags) |
213 |
#define NV_SPIN_LOCK(lock) spin_lock(lock) |
214 |
#define NV_SPIN_UNLOCK(lock) spin_unlock(lock) |
215 |
#define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock) |
216 |
#endif |
217 |
|
218 |
#if defined(NVCPU_X86) |
219 |
#ifndef write_cr4 |
220 |
#define write_cr4(x) __asm__ ("movl %0,%%cr4" :: "r" (x)); |
221 |
#endif |
222 |
|
223 |
#ifndef read_cr4 |
224 |
#define read_cr4() \ |
225 |
({ \ |
226 |
unsigned int __cr4; \ |
227 |
__asm__ ("movl %%cr4,%0" : "=r" (__cr4)); \ |
228 |
__cr4; \ |
229 |
}) |
230 |
#endif |
231 |
|
232 |
#ifndef wbinvd |
233 |
#define wbinvd() __asm__ __volatile__("wbinvd" ::: "memory"); |
234 |
#endif |
235 |
#endif /* defined(NVCPU_X86) */ |
236 |
|
237 |
#ifndef get_cpu |
238 |
#define get_cpu() smp_processor_id() |
239 |
#define put_cpu() |
240 |
#endif |
241 |
|
242 |
#if !defined(unregister_hotcpu_notifier) |
243 |
#define unregister_hotcpu_notifier unregister_cpu_notifier |
244 |
#endif |
245 |
#if !defined(register_hotcpu_notifier) |
246 |
#define register_hotcpu_notifier register_cpu_notifier |
247 |
#endif |
248 |
|
249 |
#if !defined (list_for_each) |
250 |
#define list_for_each(pos, head) \ |
251 |
for (pos = (head)->next; pos != (head); pos = (pos)->next) |
252 |
#endif |
253 |
|
254 |
#if !defined(pmd_large) |
255 |
#define pmd_large(_pmd) \ |
256 |
((pmd_val(_pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) |
257 |
#endif |
258 |
|
259 |
#if !defined(page_count) && defined(KERNEL_2_4) |
260 |
#define page_count(page) (atomic_read(&(page)->count)) |
261 |
#endif |
262 |
|
263 |
#define NV_GET_PAGE_COUNT(page_ptr) \ |
264 |
(unsigned int)page_count(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)) |
265 |
|
266 |
#if !defined(__GFP_COMP) |
267 |
#define __GFP_COMP 0 |
268 |
#endif |
269 |
|
270 |
#if !defined(DEBUG) && defined(__GFP_NOWARN) |
271 |
#define NV_GFP_KERNEL (GFP_KERNEL | __GFP_NOWARN) |
272 |
#define NV_GFP_ATOMIC (GFP_ATOMIC | __GFP_NOWARN) |
273 |
#else |
274 |
#define NV_GFP_KERNEL (GFP_KERNEL) |
275 |
#define NV_GFP_ATOMIC (GFP_ATOMIC) |
276 |
#endif |
277 |
|
278 |
#if defined(GFP_DMA32) |
279 |
/* |
280 |
* GFP_DMA32 is similar to GFP_DMA, but instructs the Linux zone |
281 |
* allocator to allocate memory from the first 4GB on platforms |
282 |
* such as Linux/x86-64; the alternative is to use an IOMMU such |
283 |
* as the one implemented with the K8 GART, if available. |
284 |
*/ |
285 |
#define NV_GFP_DMA32 (NV_GFP_KERNEL | GFP_DMA32) |
286 |
#else |
287 |
#define NV_GFP_DMA32 (NV_GFP_KERNEL) |
288 |
#endif |
289 |
|
290 |
#define CACHE_FLUSH() asm volatile("wbinvd":::"memory") |
291 |
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory") |
292 |
|
293 |
#if !defined(IRQF_SHARED) |
294 |
#define IRQF_SHARED SA_SHIRQ |
295 |
#endif |
296 |
|
297 |
#define NV_MAX_RECURRING_WARNING_MESSAGES 10 |
298 |
|
299 |
/* add support for iommu. |
300 |
* On the x86-64 platform, the driver may need to remap system |
301 |
* memory pages via AMD K8/Intel VT-d IOMMUs if a given |
302 |
* GPUs addressing capabilities are limited such that it can |
303 |
* not access the original page directly. Examples of this |
304 |
* are legacy PCI-E devices. |
305 |
*/ |
306 |
#if (defined(NVCPU_X86_64) && !defined(GFP_DMA32)) || defined(CONFIG_DMAR) |
307 |
#define NV_SG_MAP_BUFFERS 1 |
308 |
extern int nv_swiotlb; |
309 |
|
310 |
#if defined(CONFIG_DMAR) |
311 |
#define NV_INTEL_IOMMU 1 |
312 |
#else |
313 |
/* |
314 |
* Limit use of IOMMU/SWIOTLB space to 60 MB, leaving 4 MB for the rest of |
315 |
* the system (assuming a 64 MB IOMMU/SWIOTLB). |
316 |
* This is not required if Intel VT-d IOMMU is used to remap pages. |
317 |
*/ |
318 |
#define NV_NEED_REMAP_CHECK 1 |
319 |
#define NV_REMAP_LIMIT_DEFAULT (60 * 1024 * 1024) |
320 |
#endif |
321 |
#endif |
322 |
|
323 |
/* add support for software i/o tlb support. |
324 |
* normally, you'd expect this to be transparent, but unfortunately this is not |
325 |
* the case. for starters, the sw io tlb is a pool of pre-allocated pages that |
326 |
* are < 32-bits. when we ask to remap a page through this sw io tlb, we are |
327 |
* returned one of these pages, which means we have 2 different pages, rather |
328 |
* than 2 mappings to the same page. secondly, this pre-allocated pool is very |
329 |
* tiny, and the kernel panics when it is exhausted. try to warn the user that |
330 |
* they need to boost the size of their pool. |
331 |
*/ |
332 |
#if defined(CONFIG_SWIOTLB) && !defined(GFP_DMA32) |
333 |
#define NV_SWIOTLB 1 |
334 |
#endif |
335 |
|
336 |
/* |
337 |
* early 2.6 kernels changed their swiotlb codepath, running into a |
338 |
* latent bug that returns virtual addresses when it should return |
339 |
* physical addresses. we try to gracefully account for that, by |
340 |
* comparing the returned address to what should be it's virtual |
341 |
* equivalent. this should hopefully account for when the bug is |
342 |
* fixed in the core kernel. |
343 |
*/ |
344 |
#if defined(NV_SWIOTLB) && !defined(KERNEL_2_4) |
345 |
#define NV_FIXUP_SWIOTLB_VIRT_ADDR_BUG(dma_addr) \ |
346 |
if ((dma_addr) == ((dma_addr) | PAGE_OFFSET)) \ |
347 |
(dma_addr) = __pa((dma_addr)) |
348 |
#else |
349 |
#define NV_FIXUP_SWIOTLB_VIRT_ADDR_BUG(dma_addr) |
350 |
#endif |
351 |
|
352 |
#if !defined(KERNEL_2_4) && defined(NV_SG_INIT_TABLE_PRESENT) |
353 |
#define NV_SG_INIT_TABLE(sgl, nents) \ |
354 |
sg_init_table(sgl, nents) |
355 |
#else |
356 |
#define NV_SG_INIT_TABLE(sgl, nents) \ |
357 |
memset(sgl, 0, (sizeof(*(sgl)) * (nents))); |
358 |
#endif |
359 |
|
360 |
#ifndef NVWATCH |
361 |
|
362 |
/* various memory tracking/debugging techniques |
363 |
* disabled for retail builds, enabled for debug builds |
364 |
*/ |
365 |
|
366 |
// allow an easy way to convert all debug printfs related to memory |
367 |
// management back and forth between 'info' and 'errors' |
368 |
#if defined(NV_DBG_MEM) |
369 |
#define NV_DBG_MEMINFO NV_DBG_ERRORS |
370 |
#else |
371 |
#define NV_DBG_MEMINFO NV_DBG_INFO |
372 |
#endif |
373 |
|
374 |
#ifdef DEBUG |
375 |
#define NV_ENABLE_MEM_TRACKING 1 |
376 |
#endif |
377 |
|
378 |
#if defined(NV_ENABLE_MEM_TRACKING) |
379 |
#define NV_MEM_TRACKING_PAD_SIZE(size) ((size) += sizeof(void *)) |
380 |
#define NV_MEM_TRACKING_HIDE_SIZE(ptr, size) \ |
381 |
if ((ptr) && *(ptr)) { \ |
382 |
U008 *__ptr; \ |
383 |
*(unsigned long *) *(ptr) = (size); \ |
384 |
__ptr = *(ptr); __ptr += sizeof(void *); \ |
385 |
*(ptr) = (void *) __ptr; \ |
386 |
} |
387 |
#define NV_MEM_TRACKING_RETRIEVE_SIZE(ptr, size) \ |
388 |
{ \ |
389 |
U008 *__ptr = (ptr); __ptr -= sizeof(void *); \ |
390 |
(ptr) = (void *) __ptr; \ |
391 |
size = *(unsigned long *) (ptr); \ |
392 |
} |
393 |
#else |
394 |
#define NV_MEM_TRACKING_PAD_SIZE(size) |
395 |
#define NV_MEM_TRACKING_HIDE_SIZE(ptr, size) |
396 |
#define NV_MEM_TRACKING_RETRIEVE_SIZE(ptr, size) ((size) = 0) |
397 |
#endif |
398 |
|
399 |
|
400 |
/* poor man's memory allocation tracker. |
401 |
* main intention is just to see how much memory is being used to recognize |
402 |
* when memory usage gets out of control or if memory leaks are happening |
403 |
*/ |
404 |
|
405 |
/* keep track of memory usage */ |
406 |
#if defined(NV_ENABLE_MEM_TRACKING) |
407 |
|
408 |
/* print out a running tally of memory allocation amounts, disabled by default */ |
409 |
// #define POOR_MANS_MEM_CHECK 1 |
410 |
|
411 |
|
412 |
/* slightly more advanced memory allocation tracker. |
413 |
* track who's allocating memory and print out a list of currently allocated |
414 |
* memory at key points in the driver |
415 |
*/ |
416 |
|
417 |
#define MEMDBG_ALLOC(a,b) (a = kmalloc(b, NV_GFP_ATOMIC)) |
418 |
#define MEMDBG_FREE(a) (kfree(a)) |
419 |
|
420 |
#include "nv-memdbg.h" |
421 |
|
422 |
#undef MEMDBG_ALLOC |
423 |
#undef MEMDBG_FREE |
424 |
|
425 |
/* print out list of memory allocations */ |
426 |
/* default to enabled for now */ |
427 |
#define LIST_MEM_CHECK 1 |
428 |
|
429 |
/* decide which memory types to apply mem trackers to */ |
430 |
#define VM_CHECKER 1 |
431 |
#define KM_CHECKER 1 |
432 |
|
433 |
#endif /* NV_ENABLE_MEM_TRACKING */ |
434 |
|
435 |
#if defined(VM_CHECKER) |
436 |
/* kernel virtual memory usage/allocation information */ |
437 |
extern U032 vm_usage; |
438 |
extern struct mem_track_t *vm_list; |
439 |
extern nv_spinlock_t vm_lock; |
440 |
|
441 |
# if defined(POOR_MANS_MEM_CHECK) |
442 |
# define VM_PRINT(str, args...) printk(str, ##args) |
443 |
# else |
444 |
# define VM_PRINT(str, args...) |
445 |
# endif |
446 |
# if defined(LIST_MEM_CHECK) |
447 |
# define VM_ADD_MEM(a,b,c,d) nv_add_mem(&vm_list, a, b, c, d) |
448 |
# define VM_FREE_MEM(a,b,c,d) nv_free_mem(&vm_list, a, b, c, d) |
449 |
# else |
450 |
# define VM_ADD_MEM(a,b,c,d) |
451 |
# define VM_FREE_MEM(a,b,c,d) |
452 |
# endif |
453 |
# define VM_ALLOC_RECORD(ptr, size, name) \ |
454 |
if (ptr != NULL) \ |
455 |
{ \ |
456 |
NV_SPIN_LOCK(&vm_lock); \ |
457 |
vm_usage += size; \ |
458 |
VM_PRINT("NVRM: %s (0x%p: 0x%x): VM usage is now 0x%x bytes\n", \ |
459 |
name, (void *)ptr, size, vm_usage); \ |
460 |
VM_ADD_MEM(ptr, size, __FILE__, __LINE__); \ |
461 |
NV_SPIN_UNLOCK(&vm_lock); \ |
462 |
} |
463 |
# define VM_FREE_RECORD(ptr, size, name) \ |
464 |
if (ptr != NULL) \ |
465 |
{ \ |
466 |
NV_SPIN_LOCK(&vm_lock); \ |
467 |
vm_usage -= size; \ |
468 |
VM_PRINT("NVRM: %s (0x%p: 0x%x): VM usage is now 0x%x bytes\n", \ |
469 |
name, (void *)ptr, size, vm_usage); \ |
470 |
VM_FREE_MEM(ptr, size, __FILE__, __LINE__); \ |
471 |
NV_SPIN_UNLOCK(&vm_lock); \ |
472 |
} |
473 |
#else |
474 |
# define VM_ALLOC_RECORD(a,b,c) |
475 |
# define VM_FREE_RECORD(a,b,c) |
476 |
#endif |
477 |
|
478 |
#if defined(KM_CHECKER) |
479 |
/* kernel logical memory usage/allocation information */ |
480 |
extern U032 km_usage; |
481 |
extern struct mem_track_t *km_list; |
482 |
extern nv_spinlock_t km_lock; |
483 |
|
484 |
# if defined(POOR_MANS_MEM_CHECK) |
485 |
# define KM_PRINT(str, args...) printk(str, ##args) |
486 |
# else |
487 |
# define KM_PRINT(str, args...) |
488 |
# endif |
489 |
# if defined(LIST_MEM_CHECK) |
490 |
# define KM_ADD_MEM(a,b,c,d) nv_add_mem(&km_list, a, b, c, d) |
491 |
# define KM_FREE_MEM(a,b,c,d) nv_free_mem(&km_list, a, b, c, d) |
492 |
# else |
493 |
# define KM_ADD_MEM(a,b,c,d) |
494 |
# define KM_FREE_MEM(a,b,c,d) |
495 |
# endif |
496 |
# define KM_ALLOC_RECORD(ptr, size, name) \ |
497 |
if (ptr != NULL) \ |
498 |
{ \ |
499 |
unsigned long __eflags; \ |
500 |
NV_SPIN_LOCK_IRQSAVE(&km_lock, __eflags); \ |
501 |
km_usage += size; \ |
502 |
KM_PRINT("NVRM: %s (0x%p: 0x%x): KM usage is now 0x%x bytes\n", \ |
503 |
name, (void *)ptr, size, km_usage); \ |
504 |
KM_ADD_MEM(ptr, size, __FILE__, __LINE__); \ |
505 |
NV_SPIN_UNLOCK_IRQRESTORE(&km_lock, __eflags); \ |
506 |
} |
507 |
# define KM_FREE_RECORD(ptr, size, name) \ |
508 |
if (ptr != NULL) \ |
509 |
{ \ |
510 |
unsigned long __eflags; \ |
511 |
NV_SPIN_LOCK_IRQSAVE(&km_lock, __eflags); \ |
512 |
km_usage -= size; \ |
513 |
KM_PRINT("NVRM: %s (0x%p: 0x%x): KM usage is now 0x%x bytes\n", \ |
514 |
name, (void *)ptr, size, km_usage); \ |
515 |
KM_FREE_MEM(ptr, size, __FILE__, __LINE__); \ |
516 |
NV_SPIN_UNLOCK_IRQRESTORE(&km_lock, __eflags); \ |
517 |
} |
518 |
#else |
519 |
# define KM_ALLOC_RECORD(a,b,c) |
520 |
# define KM_FREE_RECORD(a,b,c) |
521 |
#endif |
522 |
|
523 |
#define NV_VMALLOC(ptr, size, cached) \ |
524 |
{ \ |
525 |
pgprot_t __prot = (cached) ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE; \ |
526 |
(ptr) = __vmalloc(size, GFP_KERNEL, __prot); \ |
527 |
VM_ALLOC_RECORD(ptr, size, "vm_vmalloc"); \ |
528 |
} |
529 |
|
530 |
#define NV_VFREE(ptr, size) \ |
531 |
{ \ |
532 |
VM_FREE_RECORD(ptr, size, "vm_vfree"); \ |
533 |
vfree((void *) (ptr)); \ |
534 |
} |
535 |
|
536 |
#define NV_IOREMAP(ptr, physaddr, size) \ |
537 |
{ \ |
538 |
(ptr) = ioremap(physaddr, size); \ |
539 |
VM_ALLOC_RECORD(ptr, size, "vm_ioremap"); \ |
540 |
} |
541 |
|
542 |
#define NV_IOREMAP_NOCACHE(ptr, physaddr, size) \ |
543 |
{ \ |
544 |
(ptr) = ioremap_nocache(physaddr, size); \ |
545 |
VM_ALLOC_RECORD(ptr, size, "vm_ioremap_nocache"); \ |
546 |
} |
547 |
|
548 |
#if defined(NV_IOREMAP_CACHE_PRESENT) |
549 |
#define NV_IOREMAP_CACHE(ptr, physaddr, size) \ |
550 |
{ \ |
551 |
(ptr) = ioremap_cache(physaddr, size); \ |
552 |
VM_ALLOC_RECORD(ptr, size, "vm_ioremap_cache"); \ |
553 |
} |
554 |
#else |
555 |
#define NV_IOREMAP_CACHE NV_IOREMAP |
556 |
#endif |
557 |
|
558 |
#if defined(NV_IOREMAP_WC_PRESENT) |
559 |
#define NV_IOREMAP_WC(ptr, physaddr, size) \ |
560 |
{ \ |
561 |
(ptr) = ioremap_wc(physaddr, size); \ |
562 |
VM_ALLOC_RECORD(ptr, size, "vm_ioremap_wc"); \ |
563 |
} |
564 |
#else |
565 |
#define NV_IOREMAP_WC NV_IOREMAP_NOCACHE |
566 |
#endif |
567 |
|
568 |
#define NV_IOUNMAP(ptr, size) \ |
569 |
{ \ |
570 |
VM_FREE_RECORD(ptr, size, "vm_iounmap"); \ |
571 |
iounmap(ptr); \ |
572 |
} |
573 |
|
574 |
/* only use this because GFP_KERNEL may sleep.. |
575 |
* GFP_ATOMIC is ok, it won't sleep |
576 |
*/ |
577 |
#define NV_KMALLOC(ptr, size) \ |
578 |
{ \ |
579 |
(ptr) = kmalloc(size, NV_GFP_KERNEL); \ |
580 |
KM_ALLOC_RECORD(ptr, size, "km_alloc"); \ |
581 |
} |
582 |
|
583 |
#define NV_KMALLOC_ATOMIC(ptr, size) \ |
584 |
{ \ |
585 |
(ptr) = kmalloc(size, NV_GFP_ATOMIC); \ |
586 |
KM_ALLOC_RECORD(ptr, size, "km_alloc_atomic"); \ |
587 |
} |
588 |
|
589 |
|
590 |
#define NV_KFREE(ptr, size) \ |
591 |
{ \ |
592 |
KM_FREE_RECORD(ptr, size, "km_free"); \ |
593 |
kfree((void *) (ptr)); \ |
594 |
} |
595 |
|
596 |
#define NV_GET_FREE_PAGES(ptr, order, gfp_mask) \ |
597 |
{ \ |
598 |
(ptr) = __get_free_pages(gfp_mask, order); \ |
599 |
} |
600 |
|
601 |
#define NV_FREE_PAGES(ptr, order) \ |
602 |
{ \ |
603 |
free_pages(ptr, order); \ |
604 |
} |
605 |
|
606 |
#if defined(NV_KMEM_CACHE_CREATE_PRESENT) |
607 |
#if (NV_KMEM_CACHE_CREATE_ARGUMENT_COUNT == 6) |
608 |
#define NV_KMEM_CACHE_CREATE(kmem_cache, name, type) \ |
609 |
{ \ |
610 |
kmem_cache = kmem_cache_create(name, sizeof(type), \ |
611 |
0, 0, NULL, NULL); \ |
612 |
} |
613 |
#elif (NV_KMEM_CACHE_CREATE_ARGUMENT_COUNT == 5) |
614 |
#define NV_KMEM_CACHE_CREATE(kmem_cache, name, type) \ |
615 |
{ \ |
616 |
kmem_cache = kmem_cache_create(name, sizeof(type), \ |
617 |
0, 0, NULL); \ |
618 |
} |
619 |
#else |
620 |
#error "NV_KMEM_CACHE_CREATE_ARGUMENT_COUNT value unrecognized!" |
621 |
#endif |
622 |
#define NV_KMEM_CACHE_DESTROY(kmem_cache) \ |
623 |
{ \ |
624 |
kmem_cache_destroy(kmem_cache); \ |
625 |
kmem_cache = NULL; \ |
626 |
} |
627 |
#else |
628 |
#error "NV_KMEM_CACHE_CREATE() undefined (kmem_cache_create() unavailable)!" |
629 |
#endif |
630 |
|
631 |
#define NV_KMEM_CACHE_ALLOC(ptr, kmem_cache, type) \ |
632 |
{ \ |
633 |
(ptr) = kmem_cache_alloc(kmem_cache, GFP_KERNEL); \ |
634 |
} |
635 |
|
636 |
#define NV_KMEM_CACHE_FREE(ptr, type, kmem_cache) \ |
637 |
{ \ |
638 |
kmem_cache_free(kmem_cache, ptr); \ |
639 |
} |
640 |
|
641 |
#if defined(NV_VMAP_PRESENT) |
642 |
#if (NV_VMAP_ARGUMENT_COUNT == 2) |
643 |
#define NV_VMAP_KERNEL(ptr, pages, count, prot) \ |
644 |
{ \ |
645 |
(ptr) = (unsigned long)vmap(pages, count); \ |
646 |
VM_ALLOC_RECORD((void *)ptr, (count) * PAGE_SIZE, "vm_vmap"); \ |
647 |
} |
648 |
#elif (NV_VMAP_ARGUMENT_COUNT == 4) |
649 |
#ifndef VM_MAP |
650 |
#define VM_MAP 0 |
651 |
#endif |
652 |
#define NV_VMAP_KERNEL(ptr, pages, count, prot) \ |
653 |
{ \ |
654 |
(ptr) = (unsigned long)vmap(pages, count, VM_MAP, prot); \ |
655 |
VM_ALLOC_RECORD((void *)ptr, (count) * PAGE_SIZE, "vm_vmap"); \ |
656 |
} |
657 |
#else |
658 |
#error "NV_VMAP_ARGUMENT_COUNT value unrecognized!" |
659 |
#endif |
660 |
#else |
661 |
#if defined(NV_SG_MAP_BUFFERS) |
662 |
#error "NV_VMAP() undefined (vmap() unavailable)!" |
663 |
#endif |
664 |
#endif /* NV_VMAP_PRESENT */ |
665 |
|
666 |
#define NV_VUNMAP_KERNEL(ptr, count) \ |
667 |
{ \ |
668 |
VM_FREE_RECORD((void *)ptr, (count) * PAGE_SIZE, "vm_vunmap"); \ |
669 |
vunmap((void *)(ptr)); \ |
670 |
} |
671 |
|
672 |
#define NV_VMAP(addr, pages, count, cached) \ |
673 |
{ \ |
674 |
pgprot_t __prot = (cached) ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE; \ |
675 |
void *__ptr = nv_vmap(pages, count, __prot); \ |
676 |
(addr) = (unsigned long)__ptr; \ |
677 |
} |
678 |
|
679 |
#define NV_VUNMAP(addr, count) nv_vunmap((void *)addr, count) |
680 |
|
681 |
|
682 |
#endif /* !defined NVWATCH */ |
683 |
|
684 |
#if defined(NV_SMP_CALL_FUNCTION_PRESENT) |
685 |
#if (NV_SMP_CALL_FUNCTION_ARGUMENT_COUNT == 4) |
686 |
#define NV_SMP_CALL_FUNCTION(func, info, wait) \ |
687 |
({ \ |
688 |
int __ret = smp_call_function(func, info, 1, wait); \ |
689 |
__ret; \ |
690 |
}) |
691 |
#elif (NV_SMP_CALL_FUNCTION_ARGUMENT_COUNT == 3) |
692 |
#define NV_SMP_CALL_FUNCTION(func, info, wait) \ |
693 |
({ \ |
694 |
int __ret = smp_call_function(func, info, wait); \ |
695 |
__ret; \ |
696 |
}) |
697 |
#else |
698 |
#error "NV_SMP_CALL_FUNCTION_ARGUMENT_COUNT value unrecognized!" |
699 |
#endif |
700 |
#elif defined(CONFIG_SMP) |
701 |
#error "NV_SMP_CALL_FUNCTION() undefined (smp_call_function() unavailable)!" |
702 |
#endif |
703 |
|
704 |
#if defined(NV_ON_EACH_CPU_PRESENT) |
705 |
#if (NV_ON_EACH_CPU_ARGUMENT_COUNT == 4) |
706 |
#define NV_ON_EACH_CPU(func, info, wait) \ |
707 |
({ \ |
708 |
int __ret = on_each_cpu(func, info, 1, wait); \ |
709 |
__ret; \ |
710 |
}) |
711 |
#elif (NV_ON_EACH_CPU_ARGUMENT_COUNT == 3) |
712 |
#define NV_ON_EACH_CPU(func, info, wait) \ |
713 |
({ \ |
714 |
int __ret = on_each_cpu(func, info, wait); \ |
715 |
__ret; \ |
716 |
}) |
717 |
#else |
718 |
#error "NV_ON_EACH_CPU_ARGUMENT_COUNT value unrecognized!" |
719 |
#endif |
720 |
#elif !defined(KERNEL_2_4) && defined(CONFIG_SMP) |
721 |
#error "NV_ON_EACH_CPU() undefined (on_each_cpu() unavailable)!" |
722 |
#endif |
723 |
|
724 |
static inline int nv_execute_on_all_cpus(void (*func)(void *info), void *info) |
725 |
{ |
726 |
int ret = 0; |
727 |
#if !defined(CONFIG_SMP) |
728 |
func(info); |
729 |
#elif defined(KERNEL_2_4) |
730 |
#if defined(preempt_disable) |
731 |
preempt_disable(); |
732 |
#endif |
733 |
ret = NV_SMP_CALL_FUNCTION(func, info, 1); |
734 |
func(info); |
735 |
#if defined(preempt_enable) |
736 |
preempt_enable(); |
737 |
#endif |
738 |
#else |
739 |
ret = NV_ON_EACH_CPU(func, info, 1); |
740 |
#endif |
741 |
return ret; |
742 |
} |
743 |
|
744 |
#if defined(CONFIG_PREEMPT_RT) |
745 |
#define NV_INIT_MUTEX(mutex) semaphore_init(mutex) |
746 |
#else |
747 |
#define NV_INIT_MUTEX(mutex) \ |
748 |
{ \ |
749 |
struct semaphore __mutex = \ |
750 |
__SEMAPHORE_INITIALIZER(*(mutex), 1); \ |
751 |
*(mutex) = __mutex; \ |
752 |
} |
753 |
#endif |
754 |
|
755 |
#if defined (KERNEL_2_4) |
756 |
# define NV_IS_SUSER() suser() |
757 |
# define NV_PCI_DEVICE_NAME(dev) ((dev)->name) |
758 |
# define NV_NUM_CPUS() smp_num_cpus |
759 |
# define NV_CLI() __cli() |
760 |
# define NV_SAVE_FLAGS(eflags) __save_flags(eflags) |
761 |
# define NV_RESTORE_FLAGS(eflags) __restore_flags(eflags) |
762 |
# define NV_MAY_SLEEP() (!in_interrupt()) |
763 |
# define NV_MODULE_PARAMETER(x) MODULE_PARM(x, "i") |
764 |
# define NV_MODULE_STRING_PARAMETER(x) MODULE_PARM(x, "s") |
765 |
#endif |
766 |
|
767 |
#if !defined(KERNEL_2_4) |
768 |
# define NV_IS_SUSER() capable(CAP_SYS_ADMIN) |
769 |
# define NV_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) |
770 |
# define NV_NUM_CPUS() num_online_cpus() |
771 |
# define NV_CLI() local_irq_disable() |
772 |
# define NV_SAVE_FLAGS(eflags) local_save_flags(eflags) |
773 |
# define NV_RESTORE_FLAGS(eflags) local_irq_restore(eflags) |
774 |
# define NV_MAY_SLEEP() (!irqs_disabled() && !in_interrupt() && !in_atomic()) |
775 |
# define NV_MODULE_PARAMETER(x) module_param(x, int, 0) |
776 |
# define NV_MODULE_STRING_PARAMETER(x) module_param(x, charp, 0) |
777 |
# undef MODULE_PARM |
778 |
#endif |
779 |
|
780 |
#if defined(NV_SIGNAL_STRUCT_HAS_RLIM) |
781 |
/* per-process rlimit settings */ |
782 |
#define NV_TASK_STRUCT_RLIM(current) ((current)->signal->rlim) |
783 |
#else |
784 |
/* per-thread rlimit settings */ |
785 |
#define NV_TASK_STRUCT_RLIM(current) ((current)->rlim) |
786 |
#endif |
787 |
|
788 |
#define NV_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) |
789 |
#define NV_VMA_PGOFF(vma) ((vma)->vm_pgoff) |
790 |
#define NV_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start) |
791 |
#define NV_VMA_OFFSET(vma) (((NvU64)(vma)->vm_pgoff) << PAGE_SHIFT) |
792 |
#define NV_VMA_PRIVATE(vma) ((vma)->vm_private_data) |
793 |
#define NV_VMA_FILE(vma) ((vma)->vm_file) |
794 |
|
795 |
#define NV_DEVICE_NUMBER(x) minor((x)->i_rdev) |
796 |
#define NV_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) |
797 |
|
798 |
#define NV_PCI_DISABLE_DEVICE(dev) \ |
799 |
{ \ |
800 |
NvU16 __cmd[2]; \ |
801 |
pci_read_config_word((dev), PCI_COMMAND, &__cmd[0]); \ |
802 |
pci_disable_device(dev); \ |
803 |
pci_read_config_word((dev), PCI_COMMAND, &__cmd[1]); \ |
804 |
__cmd[1] |= PCI_COMMAND_MEMORY; \ |
805 |
pci_write_config_word((dev), PCI_COMMAND, \ |
806 |
(__cmd[1] | (__cmd[0] & PCI_COMMAND_IO))); \ |
807 |
} |
808 |
|
809 |
#define NV_PCI_RESOURCE_START(dev, bar) pci_resource_start(dev, (bar)) |
810 |
#define NV_PCI_RESOURCE_SIZE(dev, bar) pci_resource_len(dev, (bar)) |
811 |
#define NV_PCI_RESOURCE_FLAGS(dev, bar) pci_resource_flags(dev, (bar)) |
812 |
#define NV_PCI_RESOURCE_VALID(dev, bar) \ |
813 |
(NV_PCI_RESOURCE_START(dev, bar) != 0 && NV_PCI_RESOURCE_SIZE(dev, bar) != 0) |
814 |
|
815 |
#define NV_PCI_BUS_NUMBER(dev) (dev)->bus->number |
816 |
#define NV_PCI_DEVFN(dev) (dev)->devfn |
817 |
#define NV_PCI_SLOT_NUMBER(dev) PCI_SLOT(NV_PCI_DEVFN(dev)) |
818 |
|
819 |
#if defined(NV_PCI_GET_CLASS_PRESENT) |
820 |
#define NV_PCI_DEV_PUT(dev) pci_dev_put(dev) |
821 |
#define NV_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) |
822 |
#define NV_PCI_GET_SLOT(bus,devfn) \ |
823 |
({ \ |
824 |
struct pci_dev *__dev = NULL; \ |
825 |
while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, __dev))) \ |
826 |
{ \ |
827 |
if (NV_PCI_BUS_NUMBER(__dev) == bus \ |
828 |
&& NV_PCI_DEVFN(__dev) == devfn) break; \ |
829 |
} \ |
830 |
__dev; \ |
831 |
}) |
832 |
#define NV_PCI_GET_CLASS(class,from) pci_get_class(class,from) |
833 |
#else |
834 |
#define NV_PCI_DEV_PUT(dev) |
835 |
#define NV_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) |
836 |
#define NV_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) |
837 |
#define NV_PCI_GET_CLASS(class,from) pci_find_class(class,from) |
838 |
#endif |
839 |
|
840 |
#define NV_PRINT_AT(nv_debug_level,at) \ |
841 |
{ \ |
842 |
nv_printf(nv_debug_level, \ |
843 |
"NVRM: VM: %s: 0x%p, %d page(s), count = %d, flags = 0x%08x, 0x%p, 0x%p\n", \ |
844 |
__FUNCTION__, at, at->num_pages, NV_ATOMIC_READ(at->usage_count), \ |
845 |
at->flags, at->key_mapping, at->page_table); \ |
846 |
} |
847 |
|
848 |
#define NV_PRINT_VMA(nv_debug_level,vma) \ |
849 |
{ \ |
850 |
nv_printf(nv_debug_level, \ |
851 |
"NVRM: VM: %s: 0x%lx - 0x%lx, 0x%08x bytes @ 0x%016llx, 0x%p, 0x%p\n", \ |
852 |
__FUNCTION__, vma->vm_start, vma->vm_end, NV_VMA_SIZE(vma), \ |
853 |
NV_VMA_OFFSET(vma), NV_VMA_PRIVATE(vma), NV_VMA_FILE(vma)); \ |
854 |
} |
855 |
|
856 |
/* |
857 |
* On Linux 2.6, we support both APM and ACPI power management. On Linux |
858 |
* 2.4, we support APM, only. ACPI support has been back-ported to the |
859 |
* Linux 2.4 kernel, but the Linux 2.4 driver model is not sufficient for |
860 |
* full ACPI support: it may work with some systems, but not reliably |
861 |
* enough for us to officially support this configuration. |
862 |
* |
863 |
* We support two Linux kernel power managment interfaces: the original |
864 |
* pm_register()/pm_unregister() on Linux 2.4 and the device driver model |
865 |
* backed PCI driver power management callbacks introduced with Linux |
866 |
* 2.6. |
867 |
* |
868 |
* The code below determines which interface to support on this kernel |
869 |
* version, if any; if built for Linux 2.6, it will also determine if the |
870 |
* kernel comes with ACPI or APM power management support. |
871 |
*/ |
872 |
#if !defined(KERNEL_2_4) && defined(CONFIG_PM) |
873 |
#define NV_PM_SUPPORT_DEVICE_DRIVER_MODEL |
874 |
#if (defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)) && !defined(CONFIG_ACPI) |
875 |
#define NV_PM_SUPPORT_NEW_STYLE_APM |
876 |
#endif |
877 |
#endif |
878 |
|
879 |
/* |
880 |
* On Linux 2.6 kernels >= 2.6.11, the PCI subsystem provides a new |
881 |
* interface that allows PCI drivers to determine the correct power state |
882 |
* for a given system power state; our suspend/resume callbacks now use |
883 |
* this interface and operate on PCI power state defines. |
884 |
* |
885 |
* Define these new PCI power state #define's here for compatibility with |
886 |
* older Linux 2.6 kernels. |
887 |
*/ |
888 |
#if !defined(KERNEL_2_4) && !defined(PCI_D0) |
889 |
#define PCI_D0 PM_SUSPEND_ON |
890 |
#define PCI_D3hot PM_SUSPEND_MEM |
891 |
#endif |
892 |
|
893 |
#if !defined(KERNEL_2_4) && !defined(NV_PM_MESSAGE_T_PRESENT) |
894 |
typedef u32 pm_message_t; |
895 |
#endif |
896 |
|
897 |
#if defined(KERNEL_2_4) && (defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)) |
898 |
#include <linux/pm.h> |
899 |
#define NV_PM_SUPPORT_OLD_STYLE_APM |
900 |
#endif |
901 |
|
902 |
#ifndef minor |
903 |
# define minor(x) MINOR(x) |
904 |
#endif |
905 |
|
906 |
#if defined(cpu_relax) |
907 |
#define NV_CPU_RELAX() cpu_relax() |
908 |
#else |
909 |
#define NV_CPU_RELAX() barrier() |
910 |
#endif |
911 |
|
912 |
#ifndef IRQ_RETVAL |
913 |
typedef void irqreturn_t; |
914 |
#define IRQ_RETVAL(a) |
915 |
#endif |
916 |
|
917 |
#ifndef PCI_CAP_ID_EXP |
918 |
#define PCI_CAP_ID_EXP 0x10 |
919 |
#endif |
920 |
|
921 |
#if defined(NV_VM_INSERT_PAGE_PRESENT) |
922 |
#define NV_VM_INSERT_PAGE(vma, addr, page) \ |
923 |
vm_insert_page(vma, addr, page) |
924 |
#endif |
925 |
#if defined(NV_REMAP_PFN_RANGE_PRESENT) |
926 |
#define NV_REMAP_PAGE_RANGE(from, offset, x...) \ |
927 |
remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) |
928 |
#elif defined(NV_REMAP_PAGE_RANGE_PRESENT) |
929 |
#if (NV_REMAP_PAGE_RANGE_ARGUMENT_COUNT == 5) |
930 |
#define NV_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) |
931 |
#elif (NV_REMAP_PAGE_RANGE_ARGUMENT_COUNT == 4) |
932 |
#define NV_REMAP_PAGE_RANGE(x...) remap_page_range(x) |
933 |
#else |
934 |
#error "NV_REMAP_PAGE_RANGE_ARGUMENT_COUNT value unrecognized!" |
935 |
#endif |
936 |
#else |
937 |
#error "NV_REMAP_PAGE_RANGE() undefined!" |
938 |
#endif |
939 |
|
940 |
#define NV_PGD_OFFSET(address, kernel, mm) \ |
941 |
({ \ |
942 |
struct mm_struct *__mm = (mm); \ |
943 |
pgd_t *__pgd; \ |
944 |
if (!kernel) \ |
945 |
__pgd = pgd_offset(__mm, address); \ |
946 |
else \ |
947 |
__pgd = pgd_offset_k(address); \ |
948 |
__pgd; \ |
949 |
}) |
950 |
|
951 |
#define NV_PGD_PRESENT(pgd) \ |
952 |
({ \ |
953 |
if ((pgd != NULL) && \ |
954 |
(pgd_bad(*pgd) || pgd_none(*pgd))) \ |
955 |
/* static */ pgd = NULL; \ |
956 |
pgd != NULL; \ |
957 |
}) |
958 |
|
959 |
#if defined(pmd_offset_map) |
960 |
#define NV_PMD_OFFSET(address, pgd) \ |
961 |
({ \ |
962 |
pmd_t *__pmd; \ |
963 |
__pmd = pmd_offset_map(pgd, address); \ |
964 |
}) |
965 |
#define NV_PMD_UNMAP(pmd) pmd_unmap(pmd); |
966 |
#else |
967 |
#if defined(PUD_SHIFT) /* 4-level pgtable */ |
968 |
#define NV_PMD_OFFSET(address, pgd) \ |
969 |
({ \ |
970 |
pmd_t *__pmd = NULL; \ |
971 |
pud_t *__pud; \ |
972 |
__pud = pud_offset(pgd, address); \ |
973 |
if ((__pud != NULL) && \ |
974 |
!(pud_bad(*__pud) || pud_none(*__pud))) \ |
975 |
__pmd = pmd_offset(__pud, address); \ |
976 |
__pmd; \ |
977 |
}) |
978 |
#else /* 3-level pgtable */ |
979 |
#define NV_PMD_OFFSET(address, pgd) \ |
980 |
({ \ |
981 |
pmd_t *__pmd; \ |
982 |
__pmd = pmd_offset(pgd, address); \ |
983 |
}) |
984 |
#endif |
985 |
#define NV_PMD_UNMAP(pmd) |
986 |
#endif |
987 |
|
988 |
#define NV_PMD_PRESENT(pmd) \ |
989 |
({ \ |
990 |
if ((pmd != NULL) && \ |
991 |
(pmd_bad(*pmd) || pmd_none(*pmd))) \ |
992 |
{ \ |
993 |
NV_PMD_UNMAP(pmd); \ |
994 |
pmd = NULL; /* mark invalid */ \ |
995 |
} \ |
996 |
pmd != NULL; \ |
997 |
}) |
998 |
|
999 |
#if defined(pte_offset_atomic) |
1000 |
#define NV_PTE_OFFSET(address, pmd) \ |
1001 |
({ \ |
1002 |
pte_t *__pte; \ |
1003 |
__pte = pte_offset_atomic(pmd, address); \ |
1004 |
NV_PMD_UNMAP(pmd); __pte; \ |
1005 |
}) |
1006 |
#define NV_PTE_UNMAP(pte) pte_kunmap(pte); |
1007 |
#elif defined(pte_offset) |
1008 |
#define NV_PTE_OFFSET(address, pmd) \ |
1009 |
({ \ |
1010 |
pte_t *__pte; \ |
1011 |
__pte = pte_offset(pmd, address); \ |
1012 |
NV_PMD_UNMAP(pmd); __pte; \ |
1013 |
}) |
1014 |
#define NV_PTE_UNMAP(pte) |
1015 |
#else |
1016 |
#define NV_PTE_OFFSET(address, pmd) \ |
1017 |
({ \ |
1018 |
pte_t *__pte; \ |
1019 |
__pte = pte_offset_map(pmd, address); \ |
1020 |
NV_PMD_UNMAP(pmd); __pte; \ |
1021 |
}) |
1022 |
#define NV_PTE_UNMAP(pte) pte_unmap(pte); |
1023 |
#endif |
1024 |
|
1025 |
#define NV_PTE_PRESENT(pte) \ |
1026 |
({ \ |
1027 |
if ((pte != NULL) && !pte_present(*pte)) \ |
1028 |
{ \ |
1029 |
NV_PTE_UNMAP(pte); \ |
1030 |
pte = NULL; /* mark invalid */ \ |
1031 |
} \ |
1032 |
pte != NULL; \ |
1033 |
}) |
1034 |
|
1035 |
#define NV_PTE_VALUE(pte) \ |
1036 |
({ \ |
1037 |
unsigned long __pte_value = pte_val(*pte); \ |
1038 |
NV_PTE_UNMAP(pte); \ |
1039 |
__pte_value; \ |
1040 |
}) |
1041 |
|
1042 |
|
1043 |
#define NV_PAGE_ALIGN(addr) ( ((addr) + PAGE_SIZE - 1) / PAGE_SIZE) |
1044 |
#define NV_MASK_OFFSET(addr) ( (addr) & (PAGE_SIZE - 1) ) |
1045 |
|
1046 |
#if defined(NVCPU_X86) || defined(NVCPU_X86_64) |
1047 |
/* this isn't defined in some older kernel header files */ |
1048 |
#define NV_CPU_INTERRUPT_FLAGS_BIT (1<<9) |
1049 |
#else |
1050 |
#error define NV_CPU_INTERRUPT_FLAGS_BIT |
1051 |
#endif |
1052 |
|
1053 |
static inline int NV_IRQL_IS_RAISED(void) |
1054 |
{ |
1055 |
unsigned long int eflags; |
1056 |
NV_SAVE_FLAGS(eflags); |
1057 |
return !(eflags & NV_CPU_INTERRUPT_FLAGS_BIT); |
1058 |
} |
1059 |
|
1060 |
static inline int nv_calc_order(unsigned int size) |
1061 |
{ |
1062 |
int order = 0; |
1063 |
while ( ((1 << order) * PAGE_SIZE) < (size)) |
1064 |
{ |
1065 |
order++; |
1066 |
} |
1067 |
return order; |
1068 |
} |
1069 |
|
1070 |
/* mark memory UC-, rather than UC (don't use _PAGE_PWT) */ |
1071 |
static inline pgprot_t pgprot_noncached_weak(pgprot_t old_prot) |
1072 |
{ |
1073 |
pgprot_t new_prot = old_prot; |
1074 |
if (boot_cpu_data.x86 > 3) |
1075 |
new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); |
1076 |
return new_prot; |
1077 |
} |
1078 |
|
1079 |
#if !defined (pgprot_noncached) |
1080 |
static inline pgprot_t pgprot_noncached(pgprot_t old_prot) |
1081 |
{ |
1082 |
pgprot_t new_prot = old_prot; |
1083 |
if (boot_cpu_data.x86 > 3) |
1084 |
new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD | _PAGE_PWT); |
1085 |
return new_prot; |
1086 |
} |
1087 |
#endif |
1088 |
#if defined(KERNEL_2_4) && defined(NVCPU_X86) && !defined(pfn_to_page) |
1089 |
#define pfn_to_page(pfn) (mem_map + (pfn)) |
1090 |
#endif |
1091 |
|
1092 |
/* |
1093 |
* An allocated bit of memory using NV_MEMORY_ALLOCATION_OFFSET |
1094 |
* looks like this in the driver |
1095 |
*/ |
1096 |
|
1097 |
typedef struct nv_pte_t { |
1098 |
unsigned long phys_addr; |
1099 |
unsigned long virt_addr; |
1100 |
dma_addr_t dma_addr; |
1101 |
#ifdef NV_SG_MAP_BUFFERS |
1102 |
struct scatterlist sg_list; |
1103 |
#endif |
1104 |
#if defined(NV_SWIOTLB) |
1105 |
unsigned long orig_phys_addr; |
1106 |
unsigned long orig_virt_addr; |
1107 |
#endif |
1108 |
unsigned int page_count; |
1109 |
} nv_pte_t; |
1110 |
|
1111 |
typedef struct nv_alloc_s { |
1112 |
struct nv_alloc_s *next; |
1113 |
atomic_t usage_count; |
1114 |
unsigned int flags; |
1115 |
unsigned int num_pages; |
1116 |
unsigned int order; |
1117 |
unsigned int size; |
1118 |
nv_pte_t **page_table; /* list of physical pages allocated */ |
1119 |
void *key_mapping; /* mapping used as a key for finding this nv_alloc_t */ |
1120 |
void *file; |
1121 |
unsigned int pid; |
1122 |
void *priv_data; |
1123 |
nv_state_t *nv; |
1124 |
} nv_alloc_t; |
1125 |
|
1126 |
|
1127 |
#define NV_ALLOC_TYPE_PCI (1<<0) |
1128 |
#define NV_ALLOC_TYPE_AGP (1<<1) |
1129 |
#define NV_ALLOC_TYPE_CONTIG (1<<2) |
1130 |
|
1131 |
#define NV_ALLOC_MAPPING_SHIFT 16 |
1132 |
#define NV_ALLOC_MAPPING(flags) (((flags)>>NV_ALLOC_MAPPING_SHIFT)&0xff) |
1133 |
#define NV_ALLOC_ENC_MAPPING(flags) ((flags)<<NV_ALLOC_MAPPING_SHIFT) |
1134 |
|
1135 |
#define NV_ALLOC_MAPPING_CACHED(flags) (NV_ALLOC_MAPPING(flags) == NV_MEMORY_CACHED) |
1136 |
|
1137 |
#define NV_ALLOC_MAPPING_AGP(flags) ((flags) & NV_ALLOC_TYPE_AGP) |
1138 |
#define NV_ALLOC_MAPPING_CONTIG(flags) ((flags) & NV_ALLOC_TYPE_CONTIG) |
1139 |
|
1140 |
static inline U032 nv_alloc_init_flags(int cached, int agp, int contig) |
1141 |
{ |
1142 |
U032 flags = NV_ALLOC_ENC_MAPPING(cached); |
1143 |
if (agp) flags |= NV_ALLOC_TYPE_AGP; |
1144 |
else flags |= NV_ALLOC_TYPE_PCI; |
1145 |
if (contig && !agp) flags |= NV_ALLOC_TYPE_CONTIG; |
1146 |
return flags; |
1147 |
} |
1148 |
|
1149 |
/* linux-specific version of old nv_state_t */ |
1150 |
/* this is a general os-specific state structure. the first element *must* be |
1151 |
the general state structure, for the generic unix-based code */ |
1152 |
typedef struct { |
1153 |
nv_state_t nv_state; |
1154 |
atomic_t usage_count; |
1155 |
|
1156 |
struct pci_dev *dev; |
1157 |
void *agp_bridge; |
1158 |
nv_alloc_t *alloc_queue; |
1159 |
|
1160 |
/* keep track of any pending bottom halfes */ |
1161 |
struct tasklet_struct tasklet; |
1162 |
|
1163 |
/* get a timer callback every second */ |
1164 |
struct timer_list rc_timer; |
1165 |
|
1166 |
/* per-device locking mechanism for access to core rm */ |
1167 |
nv_spinlock_t rm_lock; |
1168 |
int rm_lock_cpu; |
1169 |
int rm_lock_count; |
1170 |
|
1171 |
/* lock for linux-specific data, not used by core rm */ |
1172 |
struct semaphore ldata_lock; |
1173 |
|
1174 |
/* lock for linux-specific alloc queue */ |
1175 |
struct semaphore at_lock; |
1176 |
} nv_linux_state_t; |
1177 |
|
1178 |
/* |
1179 |
* file-private data |
1180 |
* hide a pointer to our data structures in a file-private ptr |
1181 |
* there are times we need to grab this data back from the file |
1182 |
* data structure.. |
1183 |
*/ |
1184 |
|
1185 |
typedef struct nvidia_event |
1186 |
{ |
1187 |
struct nvidia_event *next; |
1188 |
nv_event_t event; |
1189 |
} nvidia_event_t; |
1190 |
|
1191 |
typedef struct |
1192 |
{ |
1193 |
void *nvptr; |
1194 |
nvidia_event_t *event_head; |
1195 |
nvidia_event_t *event_tail; |
1196 |
nv_spinlock_t fp_lock; |
1197 |
wait_queue_head_t waitqueue; |
1198 |
} nv_file_private_t; |
1199 |
|
1200 |
#define FILE_PRIVATE(filep) ((filep)->private_data) |
1201 |
|
1202 |
#define NV_GET_NVFP(filep) ((nv_file_private_t *) FILE_PRIVATE(filep)) |
1203 |
|
1204 |
/* for the card devices */ |
1205 |
#define NVL_FROM_FILEP(filep) (NV_GET_NVFP(filep)->nvptr) |
1206 |
|
1207 |
#define NV_GET_NVL_FROM_NV_STATE(nv) \ |
1208 |
((nv_linux_state_t *) nv->os_state) |
1209 |
|
1210 |
#define NV_STATE_PTR(nvl) (&((nvl)->nv_state)) |
1211 |
|
1212 |
|
1213 |
#define NV_ATOMIC_SET(data,val) atomic_set(&(data), (val)) |
1214 |
#define NV_ATOMIC_INC(data) atomic_inc(&(data)) |
1215 |
#define NV_ATOMIC_DEC(data) atomic_dec(&(data)) |
1216 |
#define NV_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) |
1217 |
#define NV_ATOMIC_READ(data) atomic_read(&(data)) |
1218 |
|
1219 |
extern int nv_update_memory_types; |
1220 |
|
1221 |
/* |
1222 |
* Using change_page_attr() on early Linux/x86-64 2.6 kernels may |
1223 |
* result in a BUG() being triggered. The underlying problem |
1224 |
* actually exists on multiple architectures and kernels, but only |
1225 |
* the above check for the condition and trigger a BUG(). |
1226 |
* |
1227 |
* Note that this is a due to a bug in the Linux kernel, not an |
1228 |
* NVIDIA driver bug (it can also be triggered by AGPGART). |
1229 |
* |
1230 |
* We therefore need to determine at runtime if change_page_attr() |
1231 |
* can be used safely on these kernels. |
1232 |
*/ |
1233 |
#if defined(NV_CHANGE_PAGE_ATTR_PRESENT) && defined(NVCPU_X86_64) && \ |
1234 |
!defined(KERNEL_2_4) && \ |
1235 |
(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)) |
1236 |
#define NV_CHANGE_PAGE_ATTR_BUG_PRESENT |
1237 |
#endif |
1238 |
|
1239 |
#if defined(NVCPU_X86) || defined(NVCPU_X86_64) |
1240 |
/* |
1241 |
* On Linux/x86-64 (and recent Linux/x86) kernels, the PAGE_KERNEL |
1242 |
* and PAGE_KERNEL_NOCACHE protection bit masks include _PAGE_NX |
1243 |
* to indicate that the no-execute protection page feature is used |
1244 |
* for the page in question. |
1245 |
* |
1246 |
* We need to be careful to mask out _PAGE_NX when the host system |
1247 |
* doesn't support this feature or when it's disabled: the kernel |
1248 |
* may not do this in its implementation of the change_page_attr() |
1249 |
* interface. |
1250 |
*/ |
1251 |
#ifndef X86_FEATURE_NX |
1252 |
#define X86_FEATURE_NX (1*32+20) |
1253 |
#endif |
1254 |
#ifndef boot_cpu_has |
1255 |
#define boot_cpu_has(x) test_bit(x, boot_cpu_data.x86_capability) |
1256 |
#endif |
1257 |
#ifndef MSR_EFER |
1258 |
#define MSR_EFER 0xc0000080 |
1259 |
#endif |
1260 |
#ifndef EFER_NX |
1261 |
#define EFER_NX (1 << 11) |
1262 |
#endif |
1263 |
#ifndef _PAGE_NX |
1264 |
#define _PAGE_NX ((NvU64)1 << 63) |
1265 |
#endif |
1266 |
extern NvU64 __nv_supported_pte_mask; |
1267 |
#endif |
1268 |
|
1269 |
#if defined(DEBUG) |
1270 |
#define NV_ASSERT(message, condition) \ |
1271 |
do \ |
1272 |
{ \ |
1273 |
if (!(condition)) \ |
1274 |
{ \ |
1275 |
nv_printf(NV_DBG_ERRORS, "NVRM: ASSERT: %s\n", message); \ |
1276 |
os_dbg_breakpoint(); \ |
1277 |
} \ |
1278 |
} \ |
1279 |
while (0) |
1280 |
#else |
1281 |
#define NV_ASSERT(message, condition) |
1282 |
#endif /* DEBUG */ |
1283 |
|
1284 |
int nv_verify_page_mappings(nv_pte_t *, unsigned int); |
1285 |
|
1286 |
#endif /* _NV_LINUX_H_ */ |