Line 0
Link Here
|
|
|
1 |
/* _NVRM_COPYRIGHT_BEGIN_ |
2 |
* |
3 |
* Copyright 1999-2001 by NVIDIA Corporation. All rights reserved. All |
4 |
* information contained herein is proprietary and confidential to NVIDIA |
5 |
* Corporation. Any use, reproduction, or disclosure without the written |
6 |
* permission of NVIDIA Corporation is prohibited. |
7 |
* |
8 |
* _NVRM_COPYRIGHT_END_ |
9 |
*/ |
10 |
|
11 |
#include "nv-misc.h" |
12 |
#include "os-interface.h" |
13 |
#include "nv-linux.h" |
14 |
#include "nv_compiler.h" |
15 |
#include "os-agp.h" |
16 |
#include "nv-vm.h" |
17 |
|
18 |
#if defined(MODULE_LICENSE) |
19 |
MODULE_LICENSE("NVIDIA"); |
20 |
#endif |
21 |
#if defined(MODULE_INFO) |
22 |
MODULE_INFO(supported, "external"); |
23 |
#endif |
24 |
|
25 |
#ifdef MODULE_ALIAS_CHARDEV_MAJOR |
26 |
MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER); |
27 |
#endif |
28 |
|
29 |
#if defined(KERNEL_2_4) && (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)) |
30 |
extern int i2c_add_adapter (struct i2c_adapter *) __attribute__ ((weak)); |
31 |
extern int i2c_del_adapter (struct i2c_adapter *) __attribute__ ((weak)); |
32 |
#endif |
33 |
|
34 |
#include "patches.h" |
35 |
|
36 |
/* |
37 |
* our global state; one per device |
38 |
*/ |
39 |
|
40 |
static U032 num_nv_devices = 0; |
41 |
static U032 num_probed_nv_devices = 0; |
42 |
|
43 |
nv_linux_state_t nv_linux_devices[NV_MAX_DEVICES]; |
44 |
EXPORT_SYMBOL(nv_linux_devices); |
45 |
|
46 |
#if defined(NV_PM_SUPPORT_OLD_STYLE_APM) |
47 |
static struct pm_dev *apm_nv_dev[NV_MAX_DEVICES] = { 0 }; |
48 |
#endif |
49 |
|
50 |
int nv_pat_mode = NV_PAT_MODE_DISABLED; |
51 |
|
52 |
#if defined(NVCPU_X86) || defined(NVCPU_X86_64) |
53 |
NvU64 __nv_supported_pte_mask = ~_PAGE_NX; |
54 |
#endif |
55 |
|
56 |
/* |
57 |
* And one for the control device |
58 |
*/ |
59 |
|
60 |
nv_linux_state_t nv_ctl_device = { { 0 } }; |
61 |
wait_queue_head_t nv_ctl_waitqueue; |
62 |
|
63 |
#ifdef CONFIG_PROC_FS |
64 |
struct proc_dir_entry *proc_nvidia; |
65 |
struct proc_dir_entry *proc_nvidia_warnings; |
66 |
struct proc_dir_entry *proc_nvidia_patches; |
67 |
#endif |
68 |
|
69 |
static const char *__README_warning = \ |
70 |
"The NVIDIA graphics driver tries to detect potential problems\n" |
71 |
"with the host system and warns about them using the system's\n" |
72 |
"logging mechanisms. Important warning message are also logged\n" |
73 |
"to dedicated text files in this directory.\n"; |
74 |
|
75 |
static const char *__README_patches = \ |
76 |
"The NVIDIA graphics driver's kernel interface files can be\n" |
77 |
"patched to improve compatibility with new Linux kernels or to\n" |
78 |
"fix bugs in these files. When applied, each official patch\n" |
79 |
"provides a short text file with a short description of itself\n" |
80 |
"in this directory.\n"; |
81 |
|
82 |
#if defined(NV_SWIOTLB) |
83 |
static const char *__swiotlb_warning = \ |
84 |
"You are probably using the kernel's SWIOTLB interface.\n\n" |
85 |
"Be very careful with this interface, as it is easy to exhaust\n" |
86 |
"its memory buffer, at which point it may panic the kernel.\n" |
87 |
"Please increase the default size of this buffer by specifying\n" |
88 |
"a larger buffer size with the \"swiotlb\" kernel parameter,\n" |
89 |
"e.g.: \"swiotlb=16384\".\n"; |
90 |
#endif |
91 |
|
92 |
#if defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT) |
93 |
static const char *__cpgattr_warning = \ |
94 |
"Your Linux kernel has known problems in its implementation of\n" |
95 |
"the change_page_attr() kernel interface.\n\n" |
96 |
"The NVIDIA graphics driver will attempt to work around these\n" |
97 |
"problems, but system stability may be adversely affected.\n" |
98 |
"It is recommended that you update to Linux 2.6.11 (or a newer\n" |
99 |
"Linux kernel release).\n"; |
100 |
|
101 |
static const char *__cpgattr_warning_2 = \ |
102 |
"Your Linux kernel's version and architecture indicate that it\n" |
103 |
"may have an implementation of the change_page_attr() kernel\n" |
104 |
"kernel interface known to have problems. The NVIDIA graphics\n" |
105 |
"driver made an attempt to determine whether your kernel is\n" |
106 |
"affected, but could not. It will assume the interface does not\n" |
107 |
"work correctly and attempt to employ workarounds.\n" |
108 |
"This may adversely affect system stability.\n" |
109 |
"It is recommended that you update to Linux 2.6.11 (or a newer\n" |
110 |
"Linux kernel release).\n"; |
111 |
#endif |
112 |
|
113 |
static const char *__mmconfig_warning = \ |
114 |
"Your current system configuration has known problems when\n" |
115 |
"accessing PCI Configuration Space that can lead to accesses\n" |
116 |
"to the PCI Configuration Space of the wrong PCI device. This\n" |
117 |
"is known to cause instabilities with the NVIDIA graphics driver.\n\n" |
118 |
"Please see the MMConfig section in the readme for more information\n" |
119 |
"on how to work around this problem.\n"; |
120 |
|
121 |
#if defined(NV_SG_MAP_BUFFERS) |
122 |
int nv_swiotlb = 0; |
123 |
#if defined(NV_NEED_REMAP_CHECK) |
124 |
unsigned int nv_remap_count; |
125 |
unsigned int nv_remap_limit; |
126 |
#endif |
127 |
#endif |
128 |
|
129 |
int nv_update_memory_types = 1; |
130 |
static int nv_mmconfig_failure_detected = 0; |
131 |
|
132 |
static void *nv_pte_t_cache = NULL; |
133 |
|
134 |
// allow an easy way to convert all debug printfs related to events |
135 |
// back and forth between 'info' and 'errors' |
136 |
#if defined(NV_DBG_EVENTS) |
137 |
#define NV_DBG_EVENTINFO NV_DBG_ERRORS |
138 |
#else |
139 |
#define NV_DBG_EVENTINFO NV_DBG_INFO |
140 |
#endif |
141 |
|
142 |
/*** |
143 |
*** STATIC functions, only in this file |
144 |
***/ |
145 |
|
146 |
/* nvos_ functions.. do not take a state device parameter */ |
147 |
static int nvos_post_vbios(void *, int); |
148 |
static void nvos_proc_create(void); |
149 |
static void nvos_proc_add_text_file(struct proc_dir_entry *, |
150 |
const char *, const char *); |
151 |
static void nvos_proc_remove_all(struct proc_dir_entry *); |
152 |
static void nvos_proc_remove(void); |
153 |
static int nvos_count_devices(void); |
154 |
|
155 |
static nv_alloc_t *nvos_create_alloc(struct pci_dev *, int); |
156 |
static int nvos_free_alloc(nv_alloc_t *); |
157 |
|
158 |
/* nvl_ functions.. take a linux state device pointer */ |
159 |
static nv_alloc_t *nvl_find_alloc(nv_linux_state_t *, unsigned long, unsigned long); |
160 |
static int nvl_add_alloc(nv_linux_state_t *, nv_alloc_t *); |
161 |
static int nvl_remove_alloc(nv_linux_state_t *, nv_alloc_t *); |
162 |
|
163 |
/* lock-related functions that should only be called from this file */ |
164 |
static void nv_lock_init_locks(nv_state_t *nv); |
165 |
|
166 |
|
167 |
/*** |
168 |
*** EXPORTS to Linux Kernel |
169 |
***/ |
170 |
|
171 |
/* nv_kern_* functions, driver interfaces called by the Linux kernel */ |
172 |
static void nv_kern_vma_open(struct vm_area_struct *); |
173 |
static void nv_kern_vma_release(struct vm_area_struct *); |
174 |
|
175 |
int nv_kern_open(struct inode *, struct file *); |
176 |
int nv_kern_close(struct inode *, struct file *); |
177 |
int nv_kern_mmap(struct file *, struct vm_area_struct *); |
178 |
unsigned int nv_kern_poll(struct file *, poll_table *); |
179 |
int nv_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); |
180 |
long nv_kern_unlocked_ioctl(struct file *, unsigned int, unsigned long); |
181 |
long nv_kern_compat_ioctl(struct file *, unsigned int, unsigned long); |
182 |
void nv_kern_isr_bh(unsigned long); |
183 |
#if !defined(NV_IRQ_HANDLER_T_PRESENT) || (NV_IRQ_HANDLER_T_ARGUMENT_COUNT == 3) |
184 |
irqreturn_t nv_kern_isr(int, void *, struct pt_regs *); |
185 |
#else |
186 |
irqreturn_t nv_kern_isr(int, void *); |
187 |
#endif |
188 |
void nv_kern_rc_timer(unsigned long); |
189 |
#if defined(NV_PM_SUPPORT_OLD_STYLE_APM) |
190 |
static int nv_kern_apm_event(struct pm_dev *, pm_request_t, void *); |
191 |
#endif |
192 |
|
193 |
static int nv_kern_read_cardinfo(char *, char **, off_t off, int, int *, void *); |
194 |
static int nv_kern_read_status(char *, char **, off_t off, int, int *, void *); |
195 |
static int nv_kern_read_registry(char *, char **, off_t off, int, int *, void *); |
196 |
static int nv_kern_read_agpinfo(char *, char **, off_t off, int, int *, void *); |
197 |
static int nv_kern_read_version(char *, char **, off_t off, int, int *, void *); |
198 |
static int nv_kern_read_text_file(char *, char **, off_t off, int, int *, void *); |
199 |
|
200 |
int nv_kern_ctl_open(struct inode *, struct file *); |
201 |
int nv_kern_ctl_close(struct inode *, struct file *); |
202 |
unsigned int nv_kern_ctl_poll(struct file *, poll_table *); |
203 |
|
204 |
int nv_kern_probe(struct pci_dev *, const struct pci_device_id *); |
205 |
|
206 |
#if defined(NV_PM_SUPPORT_DEVICE_DRIVER_MODEL) |
207 |
static int nv_kern_suspend(struct pci_dev *, pm_message_t); |
208 |
static int nv_kern_resume(struct pci_dev *); |
209 |
#endif |
210 |
|
211 |
/*** |
212 |
*** see nv.h for functions exported to other parts of resman |
213 |
***/ |
214 |
|
215 |
static struct pci_device_id nv_pci_table[] = { |
216 |
{ |
217 |
.vendor = PCI_VENDOR_ID_NVIDIA, |
218 |
.device = PCI_ANY_ID, |
219 |
.subvendor = PCI_ANY_ID, |
220 |
.subdevice = PCI_ANY_ID, |
221 |
.class = (PCI_CLASS_DISPLAY_VGA << 8), |
222 |
.class_mask = ~0 |
223 |
}, |
224 |
{ |
225 |
.vendor = PCI_VENDOR_ID_NVIDIA, |
226 |
.device = PCI_ANY_ID, |
227 |
.subvendor = PCI_ANY_ID, |
228 |
.subdevice = PCI_ANY_ID, |
229 |
.class = (PCI_CLASS_DISPLAY_3D << 8), |
230 |
.class_mask = ~0 |
231 |
}, |
232 |
{ } |
233 |
}; |
234 |
|
235 |
MODULE_DEVICE_TABLE(pci, nv_pci_table); |
236 |
|
237 |
static struct pci_driver nv_pci_driver = { |
238 |
.name = "nvidia", |
239 |
.id_table = nv_pci_table, |
240 |
.probe = nv_kern_probe, |
241 |
#if defined(NV_PM_SUPPORT_DEVICE_DRIVER_MODEL) |
242 |
.suspend = nv_kern_suspend, |
243 |
.resume = nv_kern_resume, |
244 |
#endif |
245 |
}; |
246 |
|
247 |
/* character driver entry points */ |
248 |
|
249 |
static struct file_operations nv_fops = { |
250 |
.owner = THIS_MODULE, |
251 |
.poll = nv_kern_poll, |
252 |
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL) |
253 |
.ioctl = nv_kern_ioctl, |
254 |
#endif |
255 |
#if defined(NV_FILE_OPERATIONS_HAS_UNLOCKED_IOCTL) |
256 |
.unlocked_ioctl = nv_kern_unlocked_ioctl, |
257 |
#endif |
258 |
#if defined(NVCPU_X86_64) && defined(NV_FILE_OPERATIONS_HAS_COMPAT_IOCTL) |
259 |
.compat_ioctl = nv_kern_compat_ioctl, |
260 |
#endif |
261 |
.mmap = nv_kern_mmap, |
262 |
.open = nv_kern_open, |
263 |
.release = nv_kern_close, |
264 |
}; |
265 |
|
266 |
// Our reserved major device number. |
267 |
int nv_major = NV_MAJOR_DEVICE_NUMBER; |
268 |
|
269 |
// pull in the pointer to the NVID stamp from the binary module |
270 |
extern const char *pNVRM_ID; |
271 |
|
272 |
#if defined(VM_CHECKER) |
273 |
/* kernel virtual memory usage/allocation information */ |
274 |
U032 vm_usage = 0; |
275 |
struct mem_track_t *vm_list = NULL; |
276 |
nv_spinlock_t vm_lock; |
277 |
#endif |
278 |
|
279 |
#if defined(KM_CHECKER) |
280 |
/* kernel logical memory usage/allocation information */ |
281 |
U032 km_usage = 0; |
282 |
struct mem_track_t *km_list = NULL; |
283 |
nv_spinlock_t km_lock; |
284 |
#endif |
285 |
|
286 |
|
287 |
/*** |
288 |
*** STATIC functions |
289 |
***/ |
290 |
|
291 |
/* specify that this card needs it's vbios posted */ |
292 |
static int nvos_post_vbios(void *args, int size) |
293 |
{ |
294 |
nv_ioctl_post_vbios_t *info; |
295 |
U032 i; |
296 |
|
297 |
if (size != sizeof(nv_ioctl_post_vbios_t)) |
298 |
return -EINVAL; |
299 |
|
300 |
info = args; |
301 |
for (i = 0; i < num_nv_devices; i++) |
302 |
{ |
303 |
nv_state_t *nv = NV_STATE_PTR(&nv_linux_devices[i]); |
304 |
if (nv->bus == info->bus && nv->slot == info->slot) |
305 |
{ |
306 |
// we assume any device was already posted and rely on |
307 |
// X to tell us which cards need posting. But if we've |
308 |
// already manually posted a card, it doesn't need to |
309 |
// be reposted again. |
310 |
if (!(nv->flags & NV_FLAG_WAS_POSTED || nv->flags & NV_FLAG_OPEN)) |
311 |
{ |
312 |
nv->flags |= NV_FLAG_NEEDS_POSTING; |
313 |
} |
314 |
} |
315 |
} |
316 |
|
317 |
return 0; |
318 |
} |
319 |
|
320 |
static |
321 |
nv_alloc_t *nvos_create_alloc( |
322 |
struct pci_dev *dev, |
323 |
int num_pages |
324 |
) |
325 |
{ |
326 |
nv_alloc_t *at; |
327 |
unsigned int pt_size, i; |
328 |
|
329 |
NV_KMALLOC(at, sizeof(nv_alloc_t)); |
330 |
if (at == NULL) |
331 |
{ |
332 |
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n"); |
333 |
return NULL; |
334 |
} |
335 |
|
336 |
memset(at, 0, sizeof(nv_alloc_t)); |
337 |
|
338 |
pt_size = num_pages * sizeof(nv_pte_t *); |
339 |
if (os_alloc_mem((void **)&at->page_table, pt_size) != RM_OK) |
340 |
{ |
341 |
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n"); |
342 |
NV_KFREE(at, sizeof(nv_alloc_t)); |
343 |
return NULL; |
344 |
} |
345 |
|
346 |
memset(at->page_table, 0, pt_size); |
347 |
at->num_pages = num_pages; |
348 |
NV_ATOMIC_SET(at->usage_count, 0); |
349 |
|
350 |
for (i = 0; i < at->num_pages; i++) |
351 |
{ |
352 |
NV_KMEM_CACHE_ALLOC(at->page_table[i], nv_pte_t_cache, nv_pte_t); |
353 |
if (at->page_table[i] == NULL) |
354 |
{ |
355 |
nv_printf(NV_DBG_ERRORS, |
356 |
"NVRM: failed to allocate page table entry\n"); |
357 |
nvos_free_alloc(at); |
358 |
return NULL; |
359 |
} |
360 |
memset(at->page_table[i], 0, sizeof(nv_pte_t)); |
361 |
} |
362 |
|
363 |
at->pid = os_get_current_process(); |
364 |
|
365 |
return at; |
366 |
} |
367 |
|
368 |
static |
369 |
int nvos_free_alloc( |
370 |
nv_alloc_t *at |
371 |
) |
372 |
{ |
373 |
unsigned int i; |
374 |
|
375 |
if (at == NULL) |
376 |
return -1; |
377 |
|
378 |
if (NV_ATOMIC_READ(at->usage_count)) |
379 |
return 1; |
380 |
|
381 |
for (i = 0; i < at->num_pages; i++) |
382 |
{ |
383 |
if (at->page_table[i] != NULL) |
384 |
NV_KMEM_CACHE_FREE(at->page_table[i], nv_pte_t, nv_pte_t_cache); |
385 |
} |
386 |
os_free_mem(at->page_table); |
387 |
|
388 |
NV_KFREE(at, sizeof(nv_alloc_t)); |
389 |
|
390 |
return 0; |
391 |
} |
392 |
|
393 |
static u8 nvos_find_agp_capability(struct pci_dev *dev) |
394 |
{ |
395 |
u16 status; |
396 |
u8 cap_ptr, cap_id; |
397 |
|
398 |
pci_read_config_word(dev, PCI_STATUS, &status); |
399 |
status &= PCI_STATUS_CAP_LIST; |
400 |
if (!status) |
401 |
return 0; |
402 |
|
403 |
switch (dev->hdr_type) { |
404 |
case PCI_HEADER_TYPE_NORMAL: |
405 |
case PCI_HEADER_TYPE_BRIDGE: |
406 |
pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); |
407 |
break; |
408 |
default: |
409 |
return 0; |
410 |
} |
411 |
|
412 |
do { |
413 |
cap_ptr &= 0xfc; |
414 |
pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); |
415 |
if (cap_id == PCI_CAP_ID_AGP) |
416 |
return cap_ptr; |
417 |
pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); |
418 |
} while (cap_ptr && cap_id != 0xff); |
419 |
|
420 |
return 0; |
421 |
} |
422 |
|
423 |
static u8 nvos_find_pci_express_capability(struct pci_dev *dev) |
424 |
{ |
425 |
u16 status; |
426 |
u8 cap_ptr, cap_id; |
427 |
|
428 |
pci_read_config_word(dev, PCI_STATUS, &status); |
429 |
status &= PCI_STATUS_CAP_LIST; |
430 |
if (!status) |
431 |
return 0; |
432 |
|
433 |
switch (dev->hdr_type) { |
434 |
case PCI_HEADER_TYPE_NORMAL: |
435 |
case PCI_HEADER_TYPE_BRIDGE: |
436 |
pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); |
437 |
break; |
438 |
default: |
439 |
return 0; |
440 |
} |
441 |
|
442 |
do { |
443 |
cap_ptr &= 0xfc; |
444 |
pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); |
445 |
if (cap_id == PCI_CAP_ID_EXP) |
446 |
return cap_ptr; |
447 |
pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); |
448 |
} while (cap_ptr && cap_id != 0xff); |
449 |
|
450 |
return 0; |
451 |
} |
452 |
|
453 |
static struct pci_dev* nvos_get_agp_device_by_class(unsigned int class) |
454 |
{ |
455 |
struct pci_dev *dev, *fdev; |
456 |
u32 slot, func, fn; |
457 |
|
458 |
dev = NV_PCI_GET_CLASS(class << 8, NULL); |
459 |
while (dev) { |
460 |
slot = NV_PCI_SLOT_NUMBER(dev); |
461 |
for (func = 0; func < 8; func++) { |
462 |
fn = PCI_DEVFN(slot, func); |
463 |
fdev = NV_PCI_GET_SLOT(NV_PCI_BUS_NUMBER(dev), fn); |
464 |
if (!fdev) |
465 |
continue; |
466 |
if (nvos_find_agp_capability(fdev)) { |
467 |
NV_PCI_DEV_PUT(dev); |
468 |
return fdev; |
469 |
} |
470 |
NV_PCI_DEV_PUT(fdev); |
471 |
} |
472 |
dev = NV_PCI_GET_CLASS(class << 8, dev); |
473 |
} |
474 |
|
475 |
return NULL; |
476 |
} |
477 |
|
478 |
static struct pci_dev* nv_get_pci_device(nv_state_t *nv) |
479 |
{ |
480 |
struct pci_dev *dev; |
481 |
|
482 |
dev = NV_PCI_GET_DEVICE(nv->vendor_id, nv->device_id, NULL); |
483 |
while (dev) { |
484 |
if (NV_PCI_SLOT_NUMBER(dev) == nv->slot |
485 |
&& NV_PCI_BUS_NUMBER(dev) == nv->bus) |
486 |
return dev; |
487 |
dev = NV_PCI_GET_DEVICE(nv->vendor_id, nv->device_id, dev); |
488 |
} |
489 |
|
490 |
return NULL; |
491 |
} |
492 |
|
493 |
static void nvos_proc_create(void) |
494 |
{ |
495 |
#ifdef CONFIG_PROC_FS |
496 |
struct pci_dev *dev; |
497 |
U032 j, i = 0; |
498 |
char name[6]; |
499 |
|
500 |
struct proc_dir_entry *entry; |
501 |
struct proc_dir_entry *proc_nvidia_agp, *proc_nvidia_cards; |
502 |
|
503 |
/* world readable directory */ |
504 |
int d_flags = S_IFDIR | S_IRUGO | S_IXUGO; |
505 |
|
506 |
/* world readable file */ |
507 |
int flags = S_IFREG | S_IRUGO; |
508 |
|
509 |
nv_state_t *nv; |
510 |
nv_linux_state_t *nvl; |
511 |
|
512 |
proc_nvidia = create_proc_entry("driver/nvidia", d_flags, NULL); |
513 |
if (!proc_nvidia) |
514 |
goto failed; |
515 |
|
516 |
proc_nvidia_cards = create_proc_entry("cards", d_flags, proc_nvidia); |
517 |
if (!proc_nvidia_cards) |
518 |
goto failed; |
519 |
|
520 |
proc_nvidia_warnings = create_proc_entry("warnings", d_flags, proc_nvidia); |
521 |
if (!proc_nvidia_warnings) |
522 |
goto failed; |
523 |
|
524 |
proc_nvidia_patches = create_proc_entry("patches", d_flags, proc_nvidia); |
525 |
if (!proc_nvidia_patches) |
526 |
goto failed; |
527 |
|
528 |
/* |
529 |
* Set the module owner to ensure that the reference |
530 |
* count reflects accesses to the proc files. |
531 |
*/ |
532 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
533 |
proc_nvidia->owner = THIS_MODULE; |
534 |
proc_nvidia_cards->owner = THIS_MODULE; |
535 |
proc_nvidia_warnings->owner = THIS_MODULE; |
536 |
proc_nvidia_patches->owner = THIS_MODULE; |
537 |
#endif |
538 |
|
539 |
for (j = 0; j < num_nv_devices; j++) |
540 |
{ |
541 |
nvl = &nv_linux_devices[j]; |
542 |
nv = NV_STATE_PTR(nvl); |
543 |
|
544 |
dev = nv_get_pci_device(nv); |
545 |
if (!dev) |
546 |
break; |
547 |
|
548 |
sprintf(name, "%d", i++); |
549 |
entry = create_proc_entry(name, flags, proc_nvidia_cards); |
550 |
if (!entry) { |
551 |
NV_PCI_DEV_PUT(dev); |
552 |
goto failed; |
553 |
} |
554 |
|
555 |
entry->data = nv; |
556 |
entry->read_proc = nv_kern_read_cardinfo; |
557 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
558 |
entry->owner = THIS_MODULE; |
559 |
#endif |
560 |
|
561 |
if (nvos_find_agp_capability(dev)) { |
562 |
/* |
563 |
* Create the /proc/driver/nvidia/agp/{status,host-bridge,card} |
564 |
* entries now that we know there's AGP hardware. |
565 |
*/ |
566 |
entry = create_proc_entry("agp", d_flags, proc_nvidia); |
567 |
if (!entry) { |
568 |
NV_PCI_DEV_PUT(dev); |
569 |
goto failed; |
570 |
} |
571 |
|
572 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
573 |
entry->owner = THIS_MODULE; |
574 |
#endif |
575 |
proc_nvidia_agp = entry; |
576 |
|
577 |
entry = create_proc_entry("status", flags, proc_nvidia_agp); |
578 |
if (!entry) { |
579 |
NV_PCI_DEV_PUT(dev); |
580 |
goto failed; |
581 |
} |
582 |
|
583 |
entry->data = nv; |
584 |
entry->read_proc = nv_kern_read_status; |
585 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
586 |
entry->owner = THIS_MODULE; |
587 |
#endif |
588 |
|
589 |
entry = create_proc_entry("host-bridge", flags, proc_nvidia_agp); |
590 |
if (!entry) { |
591 |
NV_PCI_DEV_PUT(dev); |
592 |
goto failed; |
593 |
} |
594 |
|
595 |
entry->data = NULL; |
596 |
entry->read_proc = nv_kern_read_agpinfo; |
597 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
598 |
entry->owner = THIS_MODULE; |
599 |
#endif |
600 |
|
601 |
entry = create_proc_entry("card", flags, proc_nvidia_agp); |
602 |
if (!entry) { |
603 |
NV_PCI_DEV_PUT(dev); |
604 |
goto failed; |
605 |
} |
606 |
|
607 |
entry->data = nv; |
608 |
entry->read_proc = nv_kern_read_agpinfo; |
609 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
610 |
entry->owner = THIS_MODULE; |
611 |
#endif |
612 |
} |
613 |
|
614 |
NV_PCI_DEV_PUT(dev); |
615 |
} |
616 |
|
617 |
entry = create_proc_entry("version", flags, proc_nvidia); |
618 |
if (!entry) |
619 |
goto failed; |
620 |
|
621 |
entry->read_proc = nv_kern_read_version; |
622 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
623 |
entry->owner = THIS_MODULE; |
624 |
#endif |
625 |
|
626 |
entry = create_proc_entry("registry", flags, proc_nvidia); |
627 |
if (!entry) |
628 |
goto failed; |
629 |
|
630 |
entry->read_proc = nv_kern_read_registry; |
631 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
632 |
entry->owner = THIS_MODULE; |
633 |
#endif |
634 |
|
635 |
return; |
636 |
|
637 |
failed: |
638 |
nv_printf(NV_DBG_ERRORS, "NVRM: failed to create /proc entries!\n"); |
639 |
nvos_proc_remove_all(proc_nvidia); |
640 |
#endif |
641 |
} |
642 |
|
643 |
static void |
644 |
nvos_proc_add_text_file( |
645 |
struct proc_dir_entry *parent, |
646 |
const char *filename, |
647 |
const char *text |
648 |
) |
649 |
{ |
650 |
#ifdef CONFIG_PROC_FS |
651 |
struct proc_dir_entry *entry; |
652 |
|
653 |
/* world readable file */ |
654 |
int flags = S_IFREG | S_IRUGO; |
655 |
|
656 |
entry = create_proc_entry(filename, flags, parent); |
657 |
if (!entry) return; |
658 |
|
659 |
entry->data = (void *)text; |
660 |
entry->read_proc = nv_kern_read_text_file; |
661 |
#if defined(NV_PROC_DIR_ENTRY_HAS_OWNER) |
662 |
entry->owner = THIS_MODULE; |
663 |
#endif |
664 |
#endif |
665 |
} |
666 |
|
667 |
#ifdef CONFIG_PROC_FS |
668 |
static void nvos_proc_remove_all(struct proc_dir_entry *entry) |
669 |
{ |
670 |
while (entry) { |
671 |
struct proc_dir_entry *next = entry->next; |
672 |
if (entry->subdir) |
673 |
nvos_proc_remove_all(entry->subdir); |
674 |
remove_proc_entry(entry->name, entry->parent); |
675 |
if (entry == proc_nvidia) |
676 |
break; |
677 |
entry = next; |
678 |
} |
679 |
} |
680 |
#endif |
681 |
|
682 |
static void nvos_proc_remove(void) |
683 |
{ |
684 |
#ifdef CONFIG_PROC_FS |
685 |
nvos_proc_remove_all(proc_nvidia); |
686 |
#endif |
687 |
} |
688 |
|
689 |
/* |
690 |
* Given a physical address (within the AGP aperture, plain physical |
691 |
* or 32-bit DMA'able within the IOMMU), find and return the 'at' that |
692 |
* owns the address, then return it to the caller. |
693 |
*/ |
694 |
static nv_alloc_t *nvl_find_alloc( |
695 |
nv_linux_state_t *nvl, |
696 |
unsigned long address, |
697 |
unsigned long flags |
698 |
) |
699 |
{ |
700 |
nv_alloc_t *at; |
701 |
|
702 |
for (at = nvl->alloc_queue; at; at = at->next) |
703 |
{ |
704 |
unsigned int i; |
705 |
|
706 |
// make sure this 'at' matches the flags the caller provided |
707 |
// ie, don't mistake a pci allocation with an agp allocation |
708 |
if (!(at->flags & flags)) |
709 |
continue; |
710 |
|
711 |
// most mappings will be found based on the 'key' |
712 |
if (address == ((unsigned long) at->key_mapping)) |
713 |
return at; |
714 |
|
715 |
// if agp, allow the address to fall within this range |
716 |
if (NV_ALLOC_MAPPING_AGP(at->flags) && |
717 |
(address >= (unsigned long) at->key_mapping) && |
718 |
(address + PAGE_SIZE <= (unsigned long) at->key_mapping + at->num_pages * PAGE_SIZE)) |
719 |
return at; |
720 |
|
721 |
for (i = 0; i < at->num_pages; i++) |
722 |
{ |
723 |
nv_pte_t *page_ptr = at->page_table[i]; |
724 |
|
725 |
if ((address >= page_ptr->phys_addr) && |
726 |
((address - page_ptr->phys_addr) < PAGE_SIZE)) |
727 |
return at; |
728 |
if ((address >= page_ptr->dma_addr) && |
729 |
((address - page_ptr->dma_addr) < PAGE_SIZE)) |
730 |
return at; |
731 |
} |
732 |
} |
733 |
|
734 |
/* failure is not necessarily an error if the caller |
735 |
was just probing an address */ |
736 |
nv_printf(NV_DBG_INFO, "NVRM: could not find map for vm 0x%lx\n", address); |
737 |
return NULL; |
738 |
} |
739 |
|
740 |
static int nvl_add_alloc( |
741 |
nv_linux_state_t *nvl, |
742 |
nv_alloc_t *at |
743 |
) |
744 |
{ |
745 |
down(&nvl->at_lock); |
746 |
at->next = nvl->alloc_queue; |
747 |
nvl->alloc_queue = at; |
748 |
up(&nvl->at_lock); |
749 |
return 0; |
750 |
} |
751 |
|
752 |
static int nvl_remove_alloc( |
753 |
nv_linux_state_t *nvl, |
754 |
nv_alloc_t *at |
755 |
) |
756 |
{ |
757 |
nv_alloc_t *tmp, *prev; |
758 |
|
759 |
if (nvl->alloc_queue == at) |
760 |
{ |
761 |
nvl->alloc_queue = nvl->alloc_queue->next; |
762 |
return 0; |
763 |
} |
764 |
|
765 |
for (tmp = prev = nvl->alloc_queue; tmp; prev = tmp, tmp = tmp->next) |
766 |
{ |
767 |
if (tmp == at) |
768 |
{ |
769 |
prev->next = tmp->next; |
770 |
return 0; |
771 |
} |
772 |
} |
773 |
|
774 |
return -1; |
775 |
} |
776 |
|
777 |
static int __nv_enable_pat_support (void); |
778 |
static void __nv_disable_pat_support (void); |
779 |
|
780 |
#if defined(NV_ENABLE_PAT_SUPPORT) |
781 |
/* |
782 |
* Private PAT support for use by the NVIDIA driver. This is used on |
783 |
* kernels that do not modify the PAT to include a write-combining |
784 |
* entry. |
785 |
*/ |
786 |
static int __determine_pat_mode (void); |
787 |
static void __nv_setup_pat_entries (void *); |
788 |
static void __nv_restore_pat_entries (void *); |
789 |
|
790 |
#define NV_READ_PAT_ENTRIES(pat1, pat2) rdmsr(0x277, (pat1), (pat2)) |
791 |
#define NV_WRITE_PAT_ENTRIES(pat1, pat2) wrmsr(0x277, (pat1), (pat2)) |
792 |
#define NV_PAT_ENTRY(pat, index) \ |
793 |
(((pat) & (0xff << ((index)*8))) >> ((index)*8)) |
794 |
|
795 |
static inline void __nv_disable_caches(unsigned long *cr4) |
796 |
{ |
797 |
unsigned long cr0 = read_cr0(); |
798 |
write_cr0(((cr0 & (0xdfffffff)) | 0x40000000)); |
799 |
wbinvd(); |
800 |
*cr4 = read_cr4(); |
801 |
if (*cr4 & 0x80) write_cr4(*cr4 & ~0x80); |
802 |
__flush_tlb(); |
803 |
} |
804 |
|
805 |
static inline void __nv_enable_caches(unsigned long cr4) |
806 |
{ |
807 |
unsigned long cr0 = read_cr0(); |
808 |
wbinvd(); |
809 |
__flush_tlb(); |
810 |
write_cr0((cr0 & 0x9fffffff)); |
811 |
if (cr4 & 0x80) write_cr4(cr4); |
812 |
} |
813 |
|
814 |
static int __determine_pat_mode() |
815 |
{ |
816 |
unsigned int pat1, pat2, i; |
817 |
U008 PAT_WC_index; |
818 |
|
819 |
if (!test_bit(X86_FEATURE_PAT, |
820 |
(volatile unsigned long *)&boot_cpu_data.x86_capability)) |
821 |
{ |
822 |
if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) || |
823 |
(boot_cpu_data.cpuid_level < 1) || |
824 |
((cpuid_edx(1) & (1 << 16)) == 0) || |
825 |
(boot_cpu_data.x86 != 6) || (boot_cpu_data.x86_model >= 15)) |
826 |
{ |
827 |
nv_printf(NV_DBG_ERRORS, |
828 |
"NVRM: CPU does not support the PAT, falling back to MTRRs.\n"); |
829 |
return NV_PAT_MODE_DISABLED; |
830 |
} |
831 |
} |
832 |
|
833 |
NV_READ_PAT_ENTRIES(pat1, pat2); |
834 |
PAT_WC_index = 0xf; |
835 |
|
836 |
for (i = 0; i < 4; i++) |
837 |
{ |
838 |
if (NV_PAT_ENTRY(pat1, i) == 0x01) |
839 |
{ |
840 |
PAT_WC_index = i; |
841 |
break; |
842 |
} |
843 |
|
844 |
if (NV_PAT_ENTRY(pat2, i) == 0x01) |
845 |
{ |
846 |
PAT_WC_index = (i + 4); |
847 |
break; |
848 |
} |
849 |
} |
850 |
|
851 |
if (PAT_WC_index == 1) |
852 |
return NV_PAT_MODE_KERNEL; |
853 |
else if (PAT_WC_index != 0xf) |
854 |
{ |
855 |
nv_printf(NV_DBG_ERRORS, |
856 |
"NVRM: PAT configuration unsupported, falling back to MTRRs.\n"); |
857 |
return NV_PAT_MODE_DISABLED; |
858 |
} |
859 |
else |
860 |
return NV_PAT_MODE_BUILTIN; |
861 |
} |
862 |
|
863 |
static unsigned long orig_pat1, orig_pat2; |
864 |
|
865 |
static void __nv_setup_pat_entries(void *info) |
866 |
{ |
867 |
unsigned long pat1, pat2, cr4; |
868 |
unsigned long eflags; |
869 |
|
870 |
#if defined(CONFIG_HOTPLUG_CPU) |
871 |
int cpu = (NvUPtr)info; |
872 |
if ((cpu != 0) && (cpu != (int)smp_processor_id())) |
873 |
return; |
874 |
#endif |
875 |
|
876 |
NV_SAVE_FLAGS(eflags); |
877 |
NV_CLI(); |
878 |
__nv_disable_caches(&cr4); |
879 |
|
880 |
NV_READ_PAT_ENTRIES(pat1, pat2); |
881 |
|
882 |
pat1 &= 0xffff00ff; |
883 |
pat1 |= 0x00000100; |
884 |
|
885 |
NV_WRITE_PAT_ENTRIES(pat1, pat2); |
886 |
|
887 |
__nv_enable_caches(cr4); |
888 |
NV_RESTORE_FLAGS(eflags); |
889 |
} |
890 |
|
891 |
static void __nv_restore_pat_entries(void *info) |
892 |
{ |
893 |
unsigned long cr4; |
894 |
unsigned long eflags; |
895 |
|
896 |
#if defined(CONFIG_HOTPLUG_CPU) |
897 |
int cpu = (NvUPtr)info; |
898 |
if ((cpu != 0) && (cpu != (int)smp_processor_id())) |
899 |
return; |
900 |
#endif |
901 |
|
902 |
NV_SAVE_FLAGS(eflags); |
903 |
NV_CLI(); |
904 |
__nv_disable_caches(&cr4); |
905 |
|
906 |
NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2); |
907 |
|
908 |
__nv_enable_caches(cr4); |
909 |
NV_RESTORE_FLAGS(eflags); |
910 |
} |
911 |
#endif |
912 |
|
913 |
static int __nv_enable_pat_support() |
914 |
{ |
915 |
#if defined(NV_ENABLE_PAT_SUPPORT) |
916 |
unsigned long pat1, pat2; |
917 |
|
918 |
if (nv_pat_mode != NV_PAT_MODE_DISABLED) |
919 |
return 1; |
920 |
|
921 |
nv_pat_mode = __determine_pat_mode(); |
922 |
|
923 |
switch (nv_pat_mode) |
924 |
{ |
925 |
case NV_PAT_MODE_DISABLED: |
926 |
/* avoid the PAT if unavailable/unusable */ |
927 |
return 0; |
928 |
case NV_PAT_MODE_KERNEL: |
929 |
/* inherit the kernel's PAT layout */ |
930 |
return 1; |
931 |
case NV_PAT_MODE_BUILTIN: |
932 |
/* use builtin code to modify the PAT layout */ |
933 |
break; |
934 |
} |
935 |
|
936 |
NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2); |
937 |
nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2); |
938 |
|
939 |
if (nv_execute_on_all_cpus(__nv_setup_pat_entries, NULL) != 0) |
940 |
{ |
941 |
nv_execute_on_all_cpus(__nv_restore_pat_entries, NULL); |
942 |
return 0; |
943 |
} |
944 |
|
945 |
NV_READ_PAT_ENTRIES(pat1, pat2); |
946 |
nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2); |
947 |
#endif |
948 |
return 1; |
949 |
} |
950 |
|
951 |
static void __nv_disable_pat_support() |
952 |
{ |
953 |
#if defined(NV_ENABLE_PAT_SUPPORT) |
954 |
unsigned long pat1, pat2; |
955 |
|
956 |
if (nv_pat_mode != NV_PAT_MODE_BUILTIN) |
957 |
return; |
958 |
|
959 |
if (nv_execute_on_all_cpus(__nv_restore_pat_entries, NULL) != 0) |
960 |
return; |
961 |
|
962 |
nv_pat_mode = NV_PAT_MODE_DISABLED; |
963 |
|
964 |
NV_READ_PAT_ENTRIES(pat1, pat2); |
965 |
nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2); |
966 |
#endif |
967 |
} |
968 |
|
969 |
#if defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT) |
970 |
/* |
971 |
* nv_verify_cpa_interface() - determine if the change_page_attr() large page |
972 |
* management accounting bug known to exist in early Linux/x86-64 kernels |
973 |
* is present in this kernel. |
974 |
* |
975 |
* There's really no good way to determine if change_page_attr() is working |
976 |
* correctly. We can't reliably use change_page_attr() on Linux/x86-64 2.6 |
977 |
* kernels < 2.6.11: if we run into the accounting bug, the Linux kernel will |
978 |
* trigger a BUG() if we attempt to restore the WB memory type of a page |
979 |
* originally part of a large page. |
980 |
* |
981 |
* So if we can successfully allocate such a page, change its memory type to |
982 |
* UC and check if the accounting was done correctly, we can determine if |
983 |
* the change_page_attr() interface can be used safely. |
984 |
* |
985 |
* Return values: |
986 |
* 0 - test passed, the change_page_attr() interface works |
987 |
* 1 - test failed, the status is unclear |
988 |
* -1 - test failed, the change_page_attr() interface is broken |
989 |
*/ |
990 |
|
991 |
static inline pte_t *check_large_page(unsigned long vaddr) |
992 |
{ |
993 |
pgd_t *pgd = NULL; |
994 |
pmd_t *pmd = NULL; |
995 |
|
996 |
pgd = NV_PGD_OFFSET(vaddr, 1, NULL); |
997 |
if (!NV_PGD_PRESENT(pgd)) |
998 |
return NULL; |
999 |
|
1000 |
pmd = NV_PMD_OFFSET(vaddr, pgd); |
1001 |
if (!pmd || pmd_none(*pmd)) |
1002 |
return NULL; |
1003 |
|
1004 |
if (!pmd_large(*pmd)) |
1005 |
return NULL; |
1006 |
|
1007 |
return (pte_t *) pmd; |
1008 |
} |
1009 |
|
1010 |
#define CPA_FIXED_MAX_ALLOCS 500 |
1011 |
|
1012 |
int nv_verify_cpa_interface(void) |
1013 |
{ |
1014 |
unsigned int i, size; |
1015 |
unsigned long large_page = 0; |
1016 |
unsigned long *vaddr_list; |
1017 |
size = sizeof(unsigned long) * CPA_FIXED_MAX_ALLOCS; |
1018 |
|
1019 |
NV_KMALLOC(vaddr_list, size); |
1020 |
if (!vaddr_list) |
1021 |
{ |
1022 |
nv_printf(NV_DBG_ERRORS, |
1023 |
"NVRM: nv_verify_cpa_interface: failed to allocate " |
1024 |
"page table\n"); |
1025 |
return 1; |
1026 |
} |
1027 |
|
1028 |
memset(vaddr_list, 0, size); |
1029 |
|
1030 |
/* try to track down an allocation from a 2M page. */ |
1031 |
for (i = 0; i < CPA_FIXED_MAX_ALLOCS; i++) |
1032 |
{ |
1033 |
vaddr_list[i] = __get_free_page(GFP_KERNEL); |
1034 |
if (!vaddr_list[i]) |
1035 |
continue; |
1036 |
|
1037 |
#if defined(_PAGE_NX) |
1038 |
if ((pgprot_val(PAGE_KERNEL) & _PAGE_NX) && |
1039 |
virt_to_phys((void *)vaddr_list[i]) < 0x400000) |
1040 |
continue; |
1041 |
#endif |
1042 |
|
1043 |
if (check_large_page(vaddr_list[i]) != NULL) |
1044 |
{ |
1045 |
large_page = vaddr_list[i]; |
1046 |
vaddr_list[i] = 0; |
1047 |
break; |
1048 |
} |
1049 |
} |
1050 |
|
1051 |
for (i = 0; i < CPA_FIXED_MAX_ALLOCS; i++) |
1052 |
{ |
1053 |
if (vaddr_list[i]) |
1054 |
free_page(vaddr_list[i]); |
1055 |
} |
1056 |
NV_KFREE(vaddr_list, size); |
1057 |
|
1058 |
if (large_page) |
1059 |
{ |
1060 |
struct page *page = virt_to_page(large_page); |
1061 |
struct page *kpte_page; |
1062 |
pte_t *kpte; |
1063 |
unsigned long kpte_val; |
1064 |
pgprot_t prot; |
1065 |
|
1066 |
// lookup a pointer to our pte |
1067 |
kpte = check_large_page(large_page); |
1068 |
kpte_val = pte_val(*kpte); |
1069 |
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); |
1070 |
|
1071 |
prot = PAGE_KERNEL_NOCACHE; |
1072 |
pgprot_val(prot) &= __nv_supported_pte_mask; |
1073 |
|
1074 |
// this should split the large page |
1075 |
change_page_attr(page, 1, prot); |
1076 |
|
1077 |
// broken kernels may get confused after splitting the page and |
1078 |
// restore the page before returning to us. detect that case. |
1079 |
if (((pte_val(*kpte) & ~_PAGE_NX) == kpte_val) && |
1080 |
(pte_val(*kpte) & _PAGE_PSE)) |
1081 |
{ |
1082 |
if ((pte_val(*kpte) & _PAGE_NX) && |
1083 |
(__nv_supported_pte_mask & _PAGE_NX) == 0) |
1084 |
clear_bit(_PAGE_BIT_NX, kpte); |
1085 |
// don't change the page back, as it's already been reverted |
1086 |
put_page(kpte_page); |
1087 |
free_page(large_page); |
1088 |
return -1; // yep, we're broken |
1089 |
} |
1090 |
|
1091 |
// ok, now see if our bookkeeping is broken |
1092 |
if (page_count(kpte_page) != 0) |
1093 |
return -1; // yep, we're broken |
1094 |
|
1095 |
prot = PAGE_KERNEL; |
1096 |
pgprot_val(prot) &= __nv_supported_pte_mask; |
1097 |
|
1098 |
// everything's ok! |
1099 |
change_page_attr(page, 1, prot); |
1100 |
free_page(large_page); |
1101 |
return 0; |
1102 |
} |
1103 |
|
1104 |
return 1; |
1105 |
} |
1106 |
#endif /* defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT) */ |
1107 |
|
1108 |
/* |
1109 |
* nv_verify_page_mappings() - verify that the kernel mapping of the specified |
1110 |
* page matches the specified type. This is to help detect bugs in the Linux |
1111 |
* kernel's change_page_attr() interface, early. |
1112 |
* |
1113 |
* This function relies on the ability to perform kernel virtul address to PFN |
1114 |
* translations and therefore on 'init_mm'. Unfortunately, the latter is no |
1115 |
* longer exported in recent Linux/x86 2.6 kernels. The export was removed at |
1116 |
* roughtly the same time as the set_pages_{uc,wb}() change_page_attr() |
1117 |
* replacement interfaces were introduced; hopefully, it will be sufficient to |
1118 |
* check for their presence. |
1119 |
*/ |
1120 |
int nv_verify_page_mappings( |
1121 |
nv_pte_t *page_ptr, |
1122 |
unsigned int cachetype |
1123 |
) |
1124 |
{ |
1125 |
#if defined(NV_CHANGE_PAGE_ATTR_PRESENT) |
1126 |
unsigned long retval = -1; |
1127 |
#if defined(NVCPU_X86) || defined(NVCPU_X86_64) |
1128 |
pgd_t *pgd = NULL; |
1129 |
pmd_t *pmd = NULL; |
1130 |
pte_t *pte = NULL; |
1131 |
unsigned int flags, expected; |
1132 |
unsigned long address; |
1133 |
static int count = 0; |
1134 |
|
1135 |
if (!nv_update_memory_types) |
1136 |
return 0; |
1137 |
|
1138 |
address = (unsigned long)__va(page_ptr->phys_addr); |
1139 |
|
1140 |
pgd = NV_PGD_OFFSET(address, 1, NULL); |
1141 |
if (!NV_PGD_PRESENT(pgd)) |
1142 |
{ |
1143 |
nv_printf(NV_DBG_ERRORS, "NVRM: pgd not present for addr 0x%lx\n", address); |
1144 |
goto failed; |
1145 |
} |
1146 |
|
1147 |
pmd = NV_PMD_OFFSET(address, pgd); |
1148 |
if (!pmd || pmd_none(*pmd)) |
1149 |
{ |
1150 |
nv_printf(NV_DBG_ERRORS, "NVRM: pmd not present for addr 0x%lx\n", address); |
1151 |
goto failed; |
1152 |
} |
1153 |
|
1154 |
// account for large pages |
1155 |
if (pmd_large(*pmd)) |
1156 |
{ |
1157 |
pte = (pte_t *)pmd; |
1158 |
flags = pte_val(*pte) & ~(PAGE_MASK|_PAGE_PSE); |
1159 |
NV_PMD_UNMAP(pmd); |
1160 |
} |
1161 |
else |
1162 |
{ |
1163 |
pte = NV_PTE_OFFSET(address, pmd); |
1164 |
if (!NV_PTE_PRESENT(pte)) |
1165 |
{ |
1166 |
nv_printf(NV_DBG_ERRORS, "NVRM: pte not present for addr 0x%lx\n", |
1167 |
address); |
1168 |
goto failed; |
1169 |
} |
1170 |
flags = NV_PTE_VALUE(pte) & ~(PAGE_MASK|_PAGE_PSE); |
1171 |
} |
1172 |
|
1173 |
switch (cachetype) |
1174 |
{ |
1175 |
case NV_MEMORY_CACHED: |
1176 |
expected = pgprot_val(PAGE_KERNEL); |
1177 |
if ((flags & ~_PAGE_NX) == (expected & ~_PAGE_NX)) |
1178 |
retval = 0; |
1179 |
break; |
1180 |
default: |
1181 |
expected = pgprot_val(PAGE_KERNEL_NOCACHE); |
1182 |
if ((flags & ~(_PAGE_NX | _PAGE_PWT)) == (expected & ~(_PAGE_NX | _PAGE_PWT))) |
1183 |
retval = 0; |
1184 |
break; |
1185 |
} |
1186 |
|
1187 |
if (retval) |
1188 |
{ |
1189 |
if (count < NV_MAX_RECURRING_WARNING_MESSAGES) |
1190 |
{ |
1191 |
nv_printf(NV_DBG_ERRORS, |
1192 |
"NVRM: bad caching on address 0x%lx: actual 0x%x != expected 0x%x\n", |
1193 |
address, flags, expected); |
1194 |
} |
1195 |
|
1196 |
if (count == 0) |
1197 |
{ |
1198 |
nv_printf(NV_DBG_ERRORS, "NVRM: please see the README section on " |
1199 |
"Cache Aliasing for more information\n"); |
1200 |
} |
1201 |
|
1202 |
count++; |
1203 |
} |
1204 |
|
1205 |
failed: |
1206 |
#endif /* defined(NVCPU_X86) || defined(NVCPU_X86_64) */ |
1207 |
return retval; |
1208 |
#else |
1209 |
return 0; |
1210 |
#endif |
1211 |
} |
1212 |
|
1213 |
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(CONFIG_HOTPLUG_CPU) |
1214 |
static int |
1215 |
nv_kern_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
1216 |
{ |
1217 |
unsigned int cpu = get_cpu(); |
1218 |
|
1219 |
switch (action) |
1220 |
{ |
1221 |
case CPU_DOWN_FAILED: |
1222 |
case CPU_ONLINE: |
1223 |
if (cpu == (NvUPtr)hcpu) |
1224 |
__nv_setup_pat_entries(NULL); |
1225 |
else |
1226 |
NV_SMP_CALL_FUNCTION(__nv_setup_pat_entries, hcpu, 1); |
1227 |
break; |
1228 |
case CPU_DOWN_PREPARE: |
1229 |
if (cpu == (NvUPtr)hcpu) |
1230 |
__nv_restore_pat_entries(NULL); |
1231 |
else |
1232 |
NV_SMP_CALL_FUNCTION(__nv_restore_pat_entries, hcpu, 1); |
1233 |
break; |
1234 |
} |
1235 |
|
1236 |
put_cpu(); |
1237 |
|
1238 |
return NOTIFY_OK; |
1239 |
} |
1240 |
|
1241 |
static struct notifier_block nv_hotcpu_nfb = { |
1242 |
.notifier_call = nv_kern_cpu_callback, |
1243 |
.priority = 0 |
1244 |
}; |
1245 |
#endif |
1246 |
|
1247 |
|
1248 |
/*** |
1249 |
*** EXPORTS to Linux Kernel |
1250 |
***/ |
1251 |
|
1252 |
static int __init nvidia_init_module(void) |
1253 |
{ |
1254 |
int rc, disable_pat = 0; |
1255 |
U032 i, count, data; |
1256 |
nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); |
1257 |
|
1258 |
#if defined(VM_CHECKER) |
1259 |
NV_SPIN_LOCK_INIT(&vm_lock); |
1260 |
#endif |
1261 |
#if defined(KM_CHECKER) |
1262 |
NV_SPIN_LOCK_INIT(&km_lock); |
1263 |
#endif |
1264 |
|
1265 |
count = nvos_count_devices(); |
1266 |
if (count == 0) |
1267 |
{ |
1268 |
nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA graphics adapter found!\n"); |
1269 |
return -ENODEV; |
1270 |
} |
1271 |
|
1272 |
if (!rm_init_rm()) |
1273 |
{ |
1274 |
nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_rm() failed!\n"); |
1275 |
return -EIO; |
1276 |
} |
1277 |
|
1278 |
memset(nv_linux_devices, 0, sizeof(nv_linux_devices)); |
1279 |
|
1280 |
if (pci_register_driver(&nv_pci_driver) < 0) |
1281 |
{ |
1282 |
pci_unregister_driver(&nv_pci_driver); // XXX ??? |
1283 |
rm_shutdown_rm(); |
1284 |
nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA graphics adapter found!\n"); |
1285 |
return -ENODEV; |
1286 |
} |
1287 |
|
1288 |
if (num_probed_nv_devices != count) |
1289 |
{ |
1290 |
nv_printf(NV_DBG_ERRORS, |
1291 |
"NVRM: The NVIDIA probe routine was not called for %d device(s).\n", |
1292 |
count - num_probed_nv_devices); |
1293 |
nv_printf(NV_DBG_ERRORS, |
1294 |
"NVRM: This can occur when a driver such as nouveau, rivafb,\n" |
1295 |
"NVRM: nvidiafb, or rivatv was loaded and obtained ownership of\n" |
1296 |
"NVRM: the NVIDIA device(s).\n"); |
1297 |
nv_printf(NV_DBG_ERRORS, |
1298 |
"NVRM: Try unloading the conflicting kernel module (and/or\n" |
1299 |
"NVRM: reconfigure your kernel without the conflicting\n" |
1300 |
"NVRM: driver(s)), then try loading the NVIDIA kernel module\n" |
1301 |
"NVRM: again.\n"); |
1302 |
} |
1303 |
|
1304 |
if (num_probed_nv_devices == 0) |
1305 |
{ |
1306 |
pci_unregister_driver(&nv_pci_driver); |
1307 |
rm_shutdown_rm(); |
1308 |
nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA graphics adapter probed!\n"); |
1309 |
return -ENODEV; |
1310 |
} |
1311 |
|
1312 |
if (num_probed_nv_devices != num_nv_devices) |
1313 |
{ |
1314 |
nv_printf(NV_DBG_ERRORS, |
1315 |
"NVRM: The NVIDIA probe routine failed for %d device(s).\n", |
1316 |
num_probed_nv_devices - num_nv_devices); |
1317 |
} |
1318 |
|
1319 |
if (num_nv_devices == 0) |
1320 |
{ |
1321 |
pci_unregister_driver(&nv_pci_driver); |
1322 |
rm_shutdown_rm(); |
1323 |
nv_printf(NV_DBG_ERRORS, |
1324 |
"NVRM: None of the NVIDIA graphics adapters were initialized!\n"); |
1325 |
return -ENODEV; |
1326 |
} |
1327 |
|
1328 |
nv_printf(NV_DBG_ERRORS, "NVRM: loading %s", pNVRM_ID); |
1329 |
if (__nv_patches[0].short_description != NULL) |
1330 |
{ |
1331 |
nv_printf(NV_DBG_ERRORS, |
1332 |
" (applied patches: %s", __nv_patches[0].short_description); |
1333 |
for (i = 1; __nv_patches[i].short_description; i++) |
1334 |
{ |
1335 |
nv_printf(NV_DBG_ERRORS, |
1336 |
",%s", __nv_patches[i].short_description); |
1337 |
} |
1338 |
nv_printf(NV_DBG_ERRORS, ")"); |
1339 |
} |
1340 |
nv_printf(NV_DBG_ERRORS, "\n"); |
1341 |
|
1342 |
rc = register_chrdev(nv_major, "nvidia", &nv_fops); |
1343 |
if (rc < 0) |
1344 |
{ |
1345 |
pci_unregister_driver(&nv_pci_driver); |
1346 |
rm_shutdown_rm(); |
1347 |
nv_printf(NV_DBG_ERRORS, "NVRM: register_chrdev() failed!\n"); |
1348 |
return rc; |
1349 |
} |
1350 |
|
1351 |
/* instantiate tasklets */ |
1352 |
for (i = 0; i < num_nv_devices; i++) |
1353 |
{ |
1354 |
/* |
1355 |
* We keep one tasklet per card to avoid latency issues with more |
1356 |
* than one device; no two instances of a single tasklet are ever |
1357 |
* executed concurrently. |
1358 |
*/ |
1359 |
NV_ATOMIC_SET(nv_linux_devices[i].tasklet.count, 1); |
1360 |
} |
1361 |
|
1362 |
// init the nvidia control device |
1363 |
{ |
1364 |
nv_state_t *nv_ctl = NV_STATE_PTR(&nv_ctl_device); |
1365 |
nv_ctl->os_state = (void *) &nv_ctl_device; |
1366 |
nv_lock_init_locks(nv_ctl); |
1367 |
} |
1368 |
|
1369 |
#if defined(NV_PM_SUPPORT_OLD_STYLE_APM) |
1370 |
for (i = 0; i < num_nv_devices; i++) |
1371 |
{ |
1372 |
apm_nv_dev[i] = pm_register(PM_PCI_DEV, PM_SYS_VGA, nv_kern_apm_event); |
1373 |
} |
1374 |
#endif |
1375 |
|
1376 |
NV_KMEM_CACHE_CREATE(nv_pte_t_cache, "nv_pte_t", nv_pte_t); |
1377 |
if (nv_pte_t_cache == NULL) |
1378 |
{ |
1379 |
rc = -ENOMEM; |
1380 |
nv_printf(NV_DBG_ERRORS, "NVRM: pte cache allocation failed\n"); |
1381 |
goto failed; |
1382 |
} |
1383 |
|
1384 |
#if defined(NV_SG_MAP_BUFFERS) && defined(NV_NEED_REMAP_CHECK) |
1385 |
rm_read_registry_dword(NV_STATE_PTR(&nv_ctl_device), "NVreg", "RemapLimit", &nv_remap_limit); |
1386 |
|
1387 |
// allow an override, but use default if no override |
1388 |
if (nv_remap_limit == 0) |
1389 |
nv_remap_limit = NV_REMAP_LIMIT_DEFAULT; |
1390 |
|
1391 |
nv_remap_count = 0; |
1392 |
#endif |
1393 |
|
1394 |
#if defined(NVCPU_X86_64) || (defined(NVCPU_X86) && defined(CONFIG_X86_PAE)) |
1395 |
if (boot_cpu_has(X86_FEATURE_NX)) |
1396 |
{ |
1397 |
U032 __eax, __edx; |
1398 |
rdmsr(MSR_EFER, __eax, __edx); |
1399 |
if ((__eax & EFER_NX) != 0) |
1400 |
__nv_supported_pte_mask |= _PAGE_NX; |
1401 |
} |
1402 |
if (_PAGE_NX != ((NvU64)1<<63)) |
1403 |
{ |
1404 |
/* |
1405 |
* Make sure we don't strip software no-execute |
1406 |
* bits from PAGE_KERNEL(_NOCACHE) before calling |
1407 |
* change_page_attr(). |
1408 |
*/ |
1409 |
__nv_supported_pte_mask |= _PAGE_NX; |
1410 |
} |
1411 |
#endif |
1412 |
|
1413 |
/* create /proc/driver/nvidia */ |
1414 |
nvos_proc_create(); |
1415 |
|
1416 |
/* |
1417 |
* Give users an opportunity to disable the driver's use of |
1418 |
* the change_page_attr() and set_pages_{uc,wb}() kernel |
1419 |
* interfaces. |
1420 |
*/ |
1421 |
rc = rm_read_registry_dword(nv, "NVreg", "UpdateMemoryTypes", &data); |
1422 |
if ((rc == 0) && ((int)data != ~0)) |
1423 |
{ |
1424 |
nv_update_memory_types = data; |
1425 |
} |
1426 |
#if defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT) |
1427 |
/* |
1428 |
* Unless we explicitely detect that the change_page_attr() |
1429 |
* inteface is fixed, disable usage of the interface on |
1430 |
* this kernel. Notify the user of this problem using the |
1431 |
* driver's /proc warnings interface (read by the installer |
1432 |
* and the bug report script). |
1433 |
*/ |
1434 |
else |
1435 |
{ |
1436 |
rc = nv_verify_cpa_interface(); |
1437 |
if (rc < 0) |
1438 |
{ |
1439 |
nv_prints(NV_DBG_ERRORS, __cpgattr_warning); |
1440 |
nvos_proc_add_text_file(proc_nvidia_warnings, "change_page_attr", |
1441 |
__cpgattr_warning); |
1442 |
nv_update_memory_types = 0; |
1443 |
} |
1444 |
else if (rc != 0) |
1445 |
{ |
1446 |
nv_prints(NV_DBG_ERRORS, __cpgattr_warning_2); |
1447 |
nvos_proc_add_text_file(proc_nvidia_warnings, "change_page_attr", |
1448 |
__cpgattr_warning_2); |
1449 |
nv_update_memory_types = 0; |
1450 |
} |
1451 |
} |
1452 |
#endif /* defined(NV_CHANGE_PAGE_ATTR_BUG_PRESENT) */ |
1453 |
|
1454 |
#if defined(NVCPU_X86_64) && defined(CONFIG_IA32_EMULATION) && \ |
1455 |
!defined(NV_FILE_OPERATIONS_HAS_COMPAT_IOCTL) |
1456 |
rm_register_ioctl_conversions(); |
1457 |
#endif |
1458 |
|
1459 |
nvos_proc_add_text_file(proc_nvidia_warnings, "README", __README_warning); |
1460 |
|
1461 |
for (i = 0; __nv_patches[i].short_description; i++) |
1462 |
{ |
1463 |
nvos_proc_add_text_file(proc_nvidia_patches, |
1464 |
__nv_patches[i].short_description, __nv_patches[i].description); |
1465 |
} |
1466 |
|
1467 |
nvos_proc_add_text_file(proc_nvidia_patches, "README", __README_patches); |
1468 |
|
1469 |
rc = rm_read_registry_dword(nv, |
1470 |
"NVreg", "UsePageAttributeTable", &data); |
1471 |
if ((rc == 0) && ((int)data != ~0)) |
1472 |
{ |
1473 |
disable_pat = (data == 0); |
1474 |
} |
1475 |
|
1476 |
if (!disable_pat) |
1477 |
{ |
1478 |
__nv_enable_pat_support(); |
1479 |
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(CONFIG_HOTPLUG_CPU) |
1480 |
if (nv_pat_mode == NV_PAT_MODE_BUILTIN) |
1481 |
{ |
1482 |
if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0) |
1483 |
{ |
1484 |
__nv_disable_pat_support(); |
1485 |
rc = -EIO; |
1486 |
nv_printf(NV_DBG_ERRORS, |
1487 |
"NVRM: CPU hotplug notifier registration failed!\n"); |
1488 |
goto failed; |
1489 |
} |
1490 |
} |
1491 |
#endif |
1492 |
} |
1493 |
else |
1494 |
{ |
1495 |
nv_printf(NV_DBG_ERRORS, |
1496 |
"NVRM: builtin PAT support disabled, falling back to MTRRs.\n"); |
1497 |
} |
1498 |
|
1499 |
#if (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)) && defined(KERNEL_2_4) |
1500 |
// attempt to load the i2c modules for linux kernel |
1501 |
// check to see if this is possible |
1502 |
if((!i2c_add_adapter) || (!i2c_del_adapter)) |
1503 |
{ |
1504 |
// attempt to load the module |
1505 |
request_module("i2c-core"); |
1506 |
|
1507 |
// recheck for valid addresses |
1508 |
if((!i2c_add_adapter) || (!i2c_del_adapter)) |
1509 |
{ |
1510 |
nv_printf(NV_DBG_ERRORS, "NVRM: Your Linux 2.4 kernel was configured to include modular\n"); |
1511 |
nv_printf(NV_DBG_ERRORS, "NVRM: support for the Linux/i2c infrastructure, but the NVIDIA\n"); |
1512 |
nv_printf(NV_DBG_ERRORS, "NVRM: Linux graphics driver was unable to locate and load the\n"); |
1513 |
nv_printf(NV_DBG_ERRORS, "NVRM: i2c-core.o kernel module.\n"); |
1514 |
nv_printf(NV_DBG_ERRORS, "NVRM: \n"); |
1515 |
nv_printf(NV_DBG_ERRORS, "NVRM: If you wish to take advantage of the NVIDIA driver's i2c\n"); |
1516 |
nv_printf(NV_DBG_ERRORS, "NVRM: support feature, please make sure the Linux/i2c kernel\n"); |
1517 |
nv_printf(NV_DBG_ERRORS, "NVRM: modules are installed correctly.\n"); |
1518 |
} |
1519 |
} |
1520 |
#endif |
1521 |
|
1522 |
return 0; |
1523 |
|
1524 |
failed: |
1525 |
if (nv_pte_t_cache != NULL) |
1526 |
NV_KMEM_CACHE_DESTROY(nv_pte_t_cache); |
1527 |
|
1528 |
#if defined(NV_PM_SUPPORT_OLD_STYLE_APM) |
1529 |
for (i = 0; i < num_nv_devices; i++) |
1530 |
if (apm_nv_dev[i] != NULL) pm_unregister(apm_nv_dev[i]); |
1531 |
#endif |
1532 |
|
1533 |
unregister_chrdev(nv_major, "nvidia"); |
1534 |
|
1535 |
for (i = 0; i < num_nv_devices; i++) |
1536 |
{ |
1537 |
if (nv_linux_devices[i].dev) |
1538 |
{ |
1539 |
struct pci_dev *dev = nv_linux_devices[i].dev; |
1540 |
release_mem_region(NV_PCI_RESOURCE_START(dev, NV_GPU_BAR_INDEX_REGS), |
1541 |
NV_PCI_RESOURCE_SIZE(dev, NV_GPU_BAR_INDEX_REGS)); |
1542 |
NV_PCI_DISABLE_DEVICE(dev); |
1543 |
} |
1544 |
} |
1545 |
|
1546 |
pci_unregister_driver(&nv_pci_driver); |
1547 |
rm_shutdown_rm(); |
1548 |
|
1549 |
return rc; |
1550 |
} |
1551 |
|
1552 |
static void __exit nvidia_exit_module(void) |
1553 |
{ |
1554 |
U032 i; |
1555 |
nv_linux_state_t *nvl; |
1556 |
nv_state_t *nv; |
1557 |
|
1558 |
nv_printf(NV_DBG_INFO, "NVRM: nvidia_exit_module\n"); |
1559 |
|
1560 |
unregister_chrdev(nv_major, "nvidia"); |
1561 |
|
1562 |
for (i = 0; i < num_nv_devices; i++) |
1563 |
{ |
1564 |
struct pci_dev *dev; |
1565 |
nvl = &nv_linux_devices[i]; |
1566 |
|
1567 |
if ((dev = nvl->dev) != NULL) |
1568 |
{ |
1569 |
rm_i2c_remove_adapters(NV_STATE_PTR(nvl)); |
1570 |
|
1571 |
rm_free_private_state(NV_STATE_PTR(nvl)); |
1572 |
release_mem_region(NV_PCI_RESOURCE_START(dev, NV_GPU_BAR_INDEX_REGS), |
1573 |
NV_PCI_RESOURCE_SIZE(dev, NV_GPU_BAR_INDEX_REGS)); |
1574 |
NV_PCI_DISABLE_DEVICE(dev); |
1575 |
} |
1576 |
} |
1577 |
|
1578 |
pci_unregister_driver(&nv_pci_driver); |
1579 |
|
1580 |
/* remove /proc/driver/nvidia */ |
1581 |
nvos_proc_remove(); |
1582 |
|
1583 |
#if defined(NV_PM_SUPPORT_OLD_STYLE_APM) |
1584 |
for (i = 0; i < num_nv_devices; i++) |
1585 |
{ |
1586 |
if (apm_nv_dev[i] != NULL) pm_unregister(apm_nv_dev[i]); |
1587 |
} |
1588 |
#endif |
1589 |
|
1590 |
/* |
1591 |
* Make sure we freed up all the mappings. The kernel should |
1592 |
* do this automatically before calling close. |
1593 |
*/ |
1594 |
for (i = 0; i < num_nv_devices; i++) |
1595 |
{ |
1596 |
nvl = &nv_linux_devices[i]; |
1597 |
nv = NV_STATE_PTR(nvl); |
1598 |
|
1599 |
if (nvl->alloc_queue) |
1600 |
{ |
1601 |
nv_alloc_t *at = nvl->alloc_queue, *next; |
1602 |
while (at) |
1603 |
{ |
1604 |
NV_PRINT_AT(NV_DBG_ERRORS, at); |
1605 |
/* nv_free_pages() will free this 'at' */ |
1606 |
next = at->next; |
1607 |
nv_free_pages(nv, at->num_pages, |
1608 |
NV_ALLOC_MAPPING_AGP(at->flags), |
1609 |
NV_ALLOC_MAPPING_CONTIG(at->flags), |
1610 |
NV_ALLOC_MAPPING(at->flags), |
1611 |
(void *)at); |
1612 |
at = next; |
1613 |
} |
1614 |
} |
1615 |
} |
1616 |
|
1617 |
// Shutdown the resource manager |
1618 |
rm_shutdown_rm(); |
1619 |
|
1620 |
#if defined(NVCPU_X86_64) && defined(CONFIG_IA32_EMULATION) && \ |
1621 |
!defined(NV_FILE_OPERATIONS_HAS_COMPAT_IOCTL) |
1622 |
rm_unregister_ioctl_conversions(); |
1623 |
#endif |
1624 |
|
1625 |
if (nv_pat_mode == NV_PAT_MODE_BUILTIN) |
1626 |
{ |
1627 |
__nv_disable_pat_support(); |
1628 |
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(CONFIG_HOTPLUG_CPU) |
1629 |
unregister_hotcpu_notifier(&nv_hotcpu_nfb); |
1630 |
#endif |
1631 |
} |
1632 |
|
1633 |
#if defined(NV_ENABLE_MEM_TRACKING) |
1634 |
#if defined(VM_CHECKER) |
1635 |
if (vm_usage != 0) |
1636 |
{ |
1637 |
nv_list_mem("VM", vm_list); |
1638 |
nv_printf(NV_DBG_ERRORS, |
1639 |
"NVRM: final VM memory usage: 0x%x bytes\n", vm_usage); |
1640 |
} |
1641 |
#endif |
1642 |
#if defined(KM_CHECKER) |
1643 |
if (km_usage != 0) |
1644 |
{ |
1645 |
nv_list_mem("KM", km_list); |
1646 |
nv_printf(NV_DBG_ERRORS, |
1647 |
"NVRM: final KM memory usage: 0x%x bytes\n", km_usage); |
1648 |
} |
1649 |
#endif |
1650 |
#if defined(NV_SG_MAP_BUFFERS) && defined(NV_NEED_REMAP_CHECK) |
1651 |
if (nv_remap_count != 0) |
1652 |
{ |
1653 |
nv_printf(NV_DBG_ERRORS, |
1654 |
"NVRM: final SG memory usage: 0x%x bytes\n", nv_remap_count); |
1655 |
} |
1656 |
#endif |
1657 |
#endif /* NV_ENABLE_MEM_TRACKING */ |
1658 |
|
1659 |
NV_KMEM_CACHE_DESTROY(nv_pte_t_cache); |
1660 |
} |
1661 |
|
1662 |
module_init(nvidia_init_module); |
1663 |
module_exit(nvidia_exit_module); |
1664 |
|
1665 |
|
1666 |
/* |
1667 |
* The 'struct vm_operations' open() callback is called by the Linux |
1668 |
* kernel when the parent VMA is split or copied, close() when the |
1669 |
* current VMA is about to be deleted. |
1670 |
* |
1671 |
* We implement these callbacks to keep track of the number of user |
1672 |
* mappings of system memory allocations. This was motivated by a |
1673 |
* subtle interaction problem between the driver and the kernel with |
1674 |
* respect to the bookkeeping of pages marked reserved and later |
1675 |
* mapped with mmap(). |
1676 |
* |
1677 |
* Traditionally, the Linux kernel ignored reserved pages, such that |
1678 |
* when they were mapped via mmap(), the integrity of their usage |
1679 |
* counts depended on the reserved bit being set for as long as user |
1680 |
* mappings existed. |
1681 |
* |
1682 |
* Since we mark system memory pages allocated for DMA reserved and |
1683 |
* typically map them with mmap(), we need to ensure they remain |
1684 |
* reserved until the last mapping has been torn down. This worked |
1685 |
* correctly in most cases, but in a few, the RM API called into the |
1686 |
* RM to free memory before calling munmap() to unmap it. |
1687 |
* |
1688 |
* In the past, we allowed nv_free_pages() to remove the 'at' from |
1689 |
* the parent device's allocation list in this case, but didn't |
1690 |
* release the underlying pages until the last user mapping had been |
1691 |
* destroyed: |
1692 |
* |
1693 |
* In nv_kern_vma_release(), we freed any resources associated with |
1694 |
* the allocation (IOMMU/SWIOTLB mappings, etc.) and cleared the |
1695 |
* underlying pages' reserved bits, but didn't free them. The kernel |
1696 |
* was expected to do this. |
1697 |
* |
1698 |
* This worked in practise, but made dangerous assumptions about the |
1699 |
* kernel's behavior and could fail in some cases. We now handle |
1700 |
* this case differently (see below). |
1701 |
*/ |
1702 |
static void |
1703 |
nv_kern_vma_open(struct vm_area_struct *vma) |
1704 |
{ |
1705 |
NV_PRINT_VMA(NV_DBG_MEMINFO, vma); |
1706 |
|
1707 |
if (NV_VMA_PRIVATE(vma)) |
1708 |
{ |
1709 |
nv_alloc_t *at = (nv_alloc_t *) NV_VMA_PRIVATE(vma); |
1710 |
NV_ATOMIC_INC(at->usage_count); |
1711 |
|
1712 |
if (!NV_ALLOC_MAPPING_AGP(at->flags)) |
1713 |
{ |
1714 |
NV_PRINT_AT(NV_DBG_MEMINFO, at); |
1715 |
nv_vm_list_page_count(at->page_table, at->num_pages); |
1716 |
} |
1717 |
} |
1718 |
} |
1719 |
|
1720 |
/* |
1721 |
* (see above for additional information) |
1722 |
* |
1723 |
* If the 'at' usage count drops to zero with the updated logic, the |
1724 |
* VMA's file pointer is saved; nv_kern_close() uses it to find |
1725 |
* these allocations when the parent file descriptor is closed. This |
1726 |
* will typically happen when the process exits. |
1727 |
* |
1728 |
* Since this is technically a workaround to handle possible fallout |
1729 |
* from misbehaving clients, we addtionally print a warning. |
1730 |
*/ |
1731 |
static void |
1732 |
nv_kern_vma_release(struct vm_area_struct *vma) |
1733 |
{ |
1734 |
NV_PRINT_VMA(NV_DBG_MEMINFO, vma); |
1735 |
|
1736 |
if (NV_VMA_PRIVATE(vma)) |
1737 |
{ |
1738 |
nv_alloc_t *at = (nv_alloc_t *) NV_VMA_PRIVATE(vma); |
1739 |
|
1740 |
if (NV_ATOMIC_DEC_AND_TEST(at->usage_count)) |
1741 |
{ |
1742 |
static int count = 0; |
1743 |
if ((at->pid == os_get_current_process()) && |
1744 |
(count++ < NV_MAX_RECURRING_WARNING_MESSAGES)) |
1745 |
{ |
1746 |
nv_printf(NV_DBG_MEMINFO, |
1747 |
"NVRM: VM: nv_kern_vma_release: late unmap, comm: %s, 0x%p\n", |
1748 |
current->comm, at); |
1749 |
} |
1750 |
at->file = NV_VMA_FILE(vma); |
1751 |
} |
1752 |
|
1753 |
if (!NV_ALLOC_MAPPING_AGP(at->flags)) |
1754 |
{ |
1755 |
NV_PRINT_AT(NV_DBG_MEMINFO, at); |
1756 |
nv_vm_list_page_count(at->page_table, at->num_pages); |
1757 |
} |
1758 |
} |
1759 |
} |
1760 |
|
1761 |
#if !defined(NV_VM_INSERT_PAGE_PRESENT) |
1762 |
static |
1763 |
struct page *nv_kern_vma_nopage( |
1764 |
struct vm_area_struct *vma, |
1765 |
unsigned long address, |
1766 |
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) |
1767 |
int *type |
1768 |
#else |
1769 |
int write_access |
1770 |
#endif |
1771 |
) |
1772 |
{ |
1773 |
struct page *page; |
1774 |
|
1775 |
page = pfn_to_page(vma->vm_pgoff); |
1776 |
get_page(page); |
1777 |
|
1778 |
return page; |
1779 |
} |
1780 |
#endif |
1781 |
|
1782 |
struct vm_operations_struct nv_vm_ops = { |
1783 |
.open = nv_kern_vma_open, |
1784 |
.close = nv_kern_vma_release, /* "close" */ |
1785 |
#if !defined(NV_VM_INSERT_PAGE_PRESENT) |
1786 |
.nopage = nv_kern_vma_nopage, |
1787 |
#endif |
1788 |
}; |
1789 |
|
1790 |
static nv_file_private_t * |
1791 |
nv_alloc_file_private(void) |
1792 |
{ |
1793 |
nv_file_private_t *nvfp; |
1794 |
|
1795 |
NV_KMALLOC(nvfp, sizeof(nv_file_private_t)); |
1796 |
if (!nvfp) |
1797 |
return NULL; |
1798 |
|
1799 |
memset(nvfp, 0, sizeof(nv_file_private_t)); |
1800 |
|
1801 |
init_waitqueue_head(&nvfp->waitqueue); |
1802 |
NV_SPIN_LOCK_INIT(&nvfp->fp_lock); |
1803 |
|
1804 |
return nvfp; |
1805 |
} |
1806 |
|
1807 |
static void |
1808 |
nv_free_file_private(nv_file_private_t *nvfp) |
1809 |
{ |
1810 |
nvidia_event_t *nvet; |
1811 |
|
1812 |
if (nvfp == NULL) |
1813 |
return; |
1814 |
|
1815 |
for (nvet = nvfp->event_head; nvet != NULL; nvet = nvfp->event_head) |
1816 |
{ |
1817 |
nvfp->event_head = nvfp->event_head->next; |
1818 |
NV_KFREE(nvet, sizeof(nvidia_event_t)); |
1819 |
} |
1820 |
NV_KFREE(nvfp, sizeof(nv_file_private_t)); |
1821 |
} |
1822 |
|
1823 |
|
1824 |
/* |
1825 |
** nv_kern_open |
1826 |
** |
1827 |
** nv driver open entry point. Sessions are created here. |
1828 |
*/ |
1829 |
int nv_kern_open( |
1830 |
struct inode *inode, |
1831 |
struct file *file |
1832 |
) |
1833 |
{ |
1834 |
nv_state_t *nv = NULL; |
1835 |
nv_linux_state_t *nvl = NULL; |
1836 |
U032 devnum; |
1837 |
int rc = 0, status; |
1838 |
|
1839 |
nv_printf(NV_DBG_INFO, "NVRM: nv_kern_open...\n"); |
1840 |
|
1841 |
FILE_PRIVATE(file) = nv_alloc_file_private(); |
1842 |
if (FILE_PRIVATE(file) == NULL) |
1843 |
return -ENOMEM; |
1844 |
|
1845 |
/* for control device, just jump to its open routine */ |
1846 |
/* after setting up the private data */ |
1847 |
if (NV_IS_CONTROL_DEVICE(inode)) |
1848 |
return nv_kern_ctl_open(inode, file); |
1849 |
|
1850 |
/* what device are we talking about? */ |
1851 |
devnum = NV_DEVICE_NUMBER(inode); |
1852 |
if (devnum >= num_nv_devices) |
1853 |
{ |
1854 |
nv_free_file_private(FILE_PRIVATE(file)); |
1855 |
FILE_PRIVATE(file) = NULL; |
1856 |
return -ENODEV; |
1857 |
} |
1858 |
|
1859 |
nvl = &nv_linux_devices[devnum]; |
1860 |
nv = NV_STATE_PTR(nvl); |
1861 |
|
1862 |
nv_printf(NV_DBG_INFO, "NVRM: nv_kern_open on device %d\n", devnum); |
1863 |
down(&nvl->ldata_lock); |
1864 |
|
1865 |
nv_verify_pci_config(nv); |
1866 |
|
1867 |
NVL_FROM_FILEP(file) = nvl; |
1868 |
|
1869 |
/* |
1870 |
* map the memory and allocate isr on first open |
1871 |
*/ |
1872 |
|
1873 |
if ( ! (nv->flags & NV_FLAG_OPEN)) |
1874 |
{ |
1875 |
if (nv->device_id == 0) |
1876 |
{ |
1877 |
nv_printf(NV_DBG_ERRORS, "NVRM: open of nonexistent device %d\n", |
1878 |
devnum); |
1879 |
rc = -ENXIO; |
1880 |
goto failed; |
1881 |
} |
1882 |
|
1883 |
status = request_irq(nv->interrupt_line, nv_kern_isr, IRQF_SHARED, |
1884 |
"nvidia", (void *)nvl); |
1885 |
if (status != 0) |
1886 |
{ |
1887 |
if ( nv->interrupt_line && (status == -EBUSY) ) |
1888 |
{ |
1889 |
nv_printf(NV_DBG_ERRORS, |
1890 |
"NVRM: Tried to get irq %d, but another driver", |
1891 |
(unsigned int) nv->interrupt_line); |
1892 |
nv_printf(NV_DBG_ERRORS, "NVRM: has it and is not sharing it.\n"); |
1893 |
nv_printf(NV_DBG_ERRORS, "NVRM: you may want to verify that an audio driver"); |
1894 |
nv_printf(NV_DBG_ERRORS, " isn't using the irq\n"); |
1895 |
} |
1896 |
nv_printf(NV_DBG_ERRORS, "NVRM: isr request failed 0x%x\n", status); |
1897 |
rc = -EIO; |
1898 |
goto failed; |
1899 |
} |
1900 |
|
1901 |
if ( ! rm_init_adapter(nv)) |
1902 |
{ |
1903 |
free_irq(nv->interrupt_line, (void *) nvl); |
1904 |
nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_adapter(%d) failed\n", devnum); |
1905 |
rc = -EIO; |
1906 |
goto failed; |
1907 |
} |
1908 |
|
1909 |
nvl->tasklet.func = nv_kern_isr_bh; |
1910 |
nvl->tasklet.data = (unsigned long) nv; |
1911 |
tasklet_enable(&nvl->tasklet); |
1912 |
|
1913 |
nv->flags |= NV_FLAG_OPEN; |
1914 |
} |
1915 |
|
1916 |
NV_ATOMIC_INC(nvl->usage_count); |
1917 |
|
1918 |
failed: |
1919 |
up(&nvl->ldata_lock); |
1920 |
|
1921 |
if ((rc) && FILE_PRIVATE(file)) |
1922 |
{ |
1923 |
nv_free_file_private(FILE_PRIVATE(file)); |
1924 |
FILE_PRIVATE(file) = NULL; |
1925 |
} |
1926 |
|
1927 |
return rc; |
1928 |
} |
1929 |
|
1930 |
|
1931 |
/* |
1932 |
** nv_kern_close |
1933 |
** |
1934 |
** Master driver close entry point. |
1935 |
*/ |
1936 |
|
1937 |
int nv_kern_close( |
1938 |
struct inode *inode, |
1939 |
struct file *file |
1940 |
) |
1941 |
{ |
1942 |
nv_linux_state_t *nvl = NVL_FROM_FILEP(file); |
1943 |
nv_state_t *nv = NV_STATE_PTR(nvl); |
1944 |
|
1945 |
nv_verify_pci_config(nv); |
1946 |
|
1947 |
/* for control device, just jump to its open routine */ |
1948 |
/* after setting up the private data */ |
1949 |
if (NV_IS_CONTROL_DEVICE(inode)) |
1950 |
return nv_kern_ctl_close(inode, file); |
1951 |
|
1952 |
nv_printf(NV_DBG_INFO, "NVRM: nv_kern_close on device %d\n", |
1953 |
NV_DEVICE_NUMBER(inode)); |
1954 |
|
1955 |
rm_free_unused_clients(nv, (void *)file); |
1956 |
|
1957 |
down(&nvl->at_lock); |
1958 |
if (nvl->alloc_queue != NULL) |
1959 |
{ |
1960 |
nv_alloc_t *at = nvl->alloc_queue, *next; |
1961 |
while (at != NULL) |
1962 |
{ |
1963 |
/* nv_free_pages() will free this 'at' */ |
1964 |
next = at->next; |
1965 |
if ((NV_ATOMIC_READ(at->usage_count) == 0) && (at->file == file)) |
1966 |
{ |
1967 |
NV_ATOMIC_INC(at->usage_count); |
1968 |
up(&nvl->at_lock); |
1969 |
if (at->pid == os_get_current_process()) |
1970 |
NV_PRINT_AT(NV_DBG_MEMINFO, at); |
1971 |
nv_free_pages(nv, at->num_pages, |
1972 |
NV_ALLOC_MAPPING_AGP(at->flags), |
1973 |
NV_ALLOC_MAPPING_CONTIG(at->flags), |
1974 |
NV_ALLOC_MAPPING(at->flags), |
1975 |
(void *)at); |
1976 |
down(&nvl->at_lock); |
1977 |
next = nvl->alloc_queue; /* start over */ |
1978 |
} |
1979 |
at = next; |
1980 |
} |
1981 |
} |
1982 |
up(&nvl->at_lock); |
1983 |
|
1984 |
down(&nvl->ldata_lock); |
1985 |
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count)) |
1986 |
{ |
1987 |
/* |
1988 |
* The usage count for this device has dropped to zero, it can be shut |
1989 |
* down safely; disable its interrupts. |
1990 |
*/ |
1991 |
rm_disable_adapter(nv); |
1992 |
|
1993 |
/* |
1994 |
* Disable this device's tasklet to make sure that no bottom half will |
1995 |
* run with undefined device state. |
1996 |
*/ |
1997 |
tasklet_disable(&nvl->tasklet); |
1998 |
|
1999 |
/* |
2000 |
* Free the IRQ, which may block until all pending interrupt processing |
2001 |
* has completed. |
2002 |
*/ |
2003 |
free_irq(nv->interrupt_line, (void *) nvl); |
2004 |
|
2005 |
rm_shutdown_adapter(nv); |
2006 |
|
2007 |
/* |
2008 |
* Make sure we free all memory tied to this device. Memory freed here |
2009 |
* has been leaked by the core RM, warn accordingly. |
2010 |
*/ |
2011 |
if (nvl->alloc_queue) |
2012 |
{ |
2013 |
nv_alloc_t *at = nvl->alloc_queue, *next; |
2014 |
while (at) |
2015 |
{ |
2016 |
NV_PRINT_AT(NV_DBG_ERRORS, at); |
2017 |
/* nv_free_pages() will free this 'at' */ |
2018 |
next = at->next; |
2019 |
nv_free_pages(nv, at->num_pages, |
2020 |
NV_ALLOC_MAPPING_AGP(at->flags), |
2021 |
NV_ALLOC_MAPPING_CONTIG(at->flags), |
2022 |
NV_ALLOC_MAPPING(at->flags), |
2023 |
(void *)at); |
2024 |
at = next; |
2025 |
} |
2026 |
} |
2027 |
|
2028 |
/* leave INIT flag alone so we don't reinit every time */ |
2029 |
nv->flags &= ~NV_FLAG_OPEN; |
2030 |
} |
2031 |
up(&nvl->ldata_lock); |
2032 |
|
2033 |
if (FILE_PRIVATE(file)) |
2034 |
{ |
2035 |
nv_free_file_private(FILE_PRIVATE(file)); |
2036 |
FILE_PRIVATE(file) = NULL; |
2037 |
} |
2038 |
|
2039 |
return 0; |
2040 |
} |
2041 |
|
2042 |
int nv_encode_caching( |
2043 |
pgprot_t *prot, |
2044 |
unsigned int cache_type, |
2045 |
unsigned int memory_type |
2046 |
) |
2047 |
{ |
2048 |
pgprot_t tmp = __pgprot(0); |
2049 |
|
2050 |
if (prot == NULL) prot = &tmp; |
2051 |
|
2052 |
// allow setting or refusal of specific caching types |
2053 |
switch (cache_type) |
2054 |
{ |
2055 |
case NV_MEMORY_UNCACHED_WEAK: |
2056 |
*prot = pgprot_noncached_weak(*prot); |
2057 |
break; |
2058 |
case NV_MEMORY_UNCACHED: |
2059 |
*prot = pgprot_noncached(*prot); |
2060 |
break; |
2061 |
#if defined(NVCPU_X86) || defined(NVCPU_X86_64) |
2062 |
case NV_MEMORY_WRITECOMBINED: |
2063 |
#if defined(NV_ENABLE_PAT_SUPPORT) |
2064 |
if ((nv_pat_mode != NV_PAT_MODE_DISABLED) && |
2065 |
(memory_type != NV_MEMORY_TYPE_REGISTERS)) |
2066 |
{ |
2067 |
pgprot_val(*prot) &= ~(_PAGE_PSE | _PAGE_PCD | _PAGE_PWT); |
2068 |
*prot = __pgprot(pgprot_val(*prot) | _PAGE_PWT); |
2069 |
break; |
2070 |
} |
2071 |
#endif |
2072 |
/* |
2073 |
* If PAT support is unavailable and the memory space isn't |
2074 |
* NV_MEMORY_TYPE_AGP, we need to return an error code to |
2075 |
* the caller, but do not print a warning message. |
2076 |
* |
2077 |
* In the case of AGP memory, we will have attempted to add |
2078 |
* a WC MTRR for the AGP aperture and aborted the AGP |
2079 |
* initialization if this failed, so we can safely return |
2080 |
* success here. |
2081 |
* |
2082 |
* For frame buffer memory, callers are expected to use the |
2083 |
* UC- memory type if we report WC as unsupported, which |
2084 |
* translates to the effective memory type WC if a WC MTRR |
2085 |
* exists or else UC. |
2086 |
*/ |
2087 |
if (memory_type == NV_MEMORY_TYPE_AGP) |
2088 |
break; |
2089 |
return 1; |
2090 |
#endif |
2091 |
case NV_MEMORY_CACHED: |
2092 |
/* |
2093 |
* RAM is cached on Linux by default, we can assume there's |
2094 |
* nothing to be done here. This is not the case for the |
2095 |
* other memory spaces: as commented on above, we will have |
2096 |
* added a WC MTRR for the AGP aperture (or else aborted |
2097 |
* AGP initialization), and we will have made an attempt to |
2098 |
* add a WC MTRR for the frame buffer. |
2099 |
* |
2100 |
* If a WC MTRR is present, we can't satisfy the WB mapping |
2101 |
* attempt here, since the achievable effective memory |
2102 |
* types in that case are WC and UC, if not it's typically |
2103 |
* UC (MTRRdefType is UC); we could only satisfy WB mapping |
2104 |
* requests with a WB MTRR. |
2105 |
*/ |
2106 |
if (memory_type == NV_MEMORY_TYPE_SYSTEM) |
2107 |
break; |
2108 |
default: |
2109 |
nv_printf(NV_DBG_ERRORS, |
2110 |
"NVRM: VM: memory type %d not supported for memory space %d!\n", |
2111 |
cache_type, memory_type); |
2112 |
return 1; |
2113 |
} |
2114 |
return 0; |
2115 |
} |
2116 |
|
2117 |
int nv_kern_mmap( |
2118 |
struct file *file, |
2119 |
struct vm_area_struct *vma |
2120 |
) |
2121 |
{ |
2122 |
unsigned int pages; |
2123 |
nv_alloc_t *at; |
2124 |
nv_linux_state_t *nvl = NVL_FROM_FILEP(file); |
2125 |
nv_state_t *nv = NV_STATE_PTR(nvl); |
2126 |
|
2127 |
if (nv->flags & NV_FLAG_CONTROL) |
2128 |
return -ENODEV; |
2129 |
|
2130 |
NV_PRINT_VMA(NV_DBG_MEMINFO, vma); |
2131 |
|
2132 |
nv_verify_pci_config(nv); |
2133 |
|
2134 |
// be a bit paranoid for now |
2135 |
if ( NV_MASK_OFFSET(vma->vm_start) || |
2136 |
NV_MASK_OFFSET(vma->vm_end)) |
2137 |
{ |
2138 |
nv_printf(NV_DBG_ERRORS, |
2139 |
"NVRM: bad mmap range: %lx - %lx\n", |
2140 |
vma->vm_start, vma->vm_end); |
2141 |
return -EINVAL; |
2142 |
} |
2143 |
|
2144 |
pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT; |
2145 |
NV_VMA_PRIVATE(vma) = NULL; |
2146 |
|
2147 |
vma->vm_ops = &nv_vm_ops; |
2148 |
|
2149 |
#if defined(NVCPU_X86) |
2150 |
if (vma->vm_pgoff & ~0xfffff) |
2151 |
{ |
2152 |
nv_printf(NV_DBG_ERRORS, |
2153 |
"NVRM: bad mmap offset: %lx\n", vma->vm_pgoff); |
2154 |
return -EINVAL; |
2155 |
} |
2156 |
#endif |
2157 |
|
2158 |
/* NV reg space */ |
2159 |
if (IS_REG_OFFSET(nv, NV_VMA_OFFSET(vma), NV_VMA_SIZE(vma))) |
2160 |
{ |
2161 |
if (nv_encode_caching(&vma->vm_page_prot, |
2162 |
NV_MEMORY_UNCACHED, |
2163 |
NV_MEMORY_TYPE_REGISTERS)) |
2164 |
{ |
2165 |
return -ENXIO; |
2166 |
} |
2167 |
|
2168 |
if (NV_REMAP_PAGE_RANGE(vma->vm_start, |
2169 |
NV_VMA_OFFSET(vma), |
2170 |
NV_VMA_SIZE(vma), |
2171 |
vma->vm_page_prot)) |
2172 |
return -EAGAIN; |
2173 |
|
2174 |
/* mark it as IO so that we don't dump it on core dump */ |
2175 |
vma->vm_flags |= VM_IO; |
2176 |
} |
2177 |
|
2178 |
/* NV fb space */ |
2179 |
else if (IS_FB_OFFSET(nv, NV_VMA_OFFSET(vma), NV_VMA_SIZE(vma))) |
2180 |
{ |
2181 |
if (nv_encode_caching(&vma->vm_page_prot, |
2182 |
NV_MEMORY_WRITECOMBINED, |
2183 |
NV_MEMORY_TYPE_FRAMEBUFFER)) |
2184 |
{ |
2185 |
if (nv_encode_caching(&vma->vm_page_prot, |
2186 |
NV_MEMORY_UNCACHED_WEAK, |
2187 |
NV_MEMORY_TYPE_FRAMEBUFFER)) |
2188 |
{ |
2189 |
return -ENXIO; |
2190 |
} |
2191 |
} |
2192 |
|
2193 |
if (NV_REMAP_PAGE_RANGE(vma->vm_start, |
2194 |
NV_VMA_OFFSET(vma), |
2195 |
NV_VMA_SIZE(vma), |
2196 |
vma->vm_page_prot)) |
2197 |
return -EAGAIN; |
2198 |
|
2199 |
// mark it as IO so that we don't dump it on core dump |
2200 |
vma->vm_flags |= VM_IO; |
2201 |
} |
2202 |
|
2203 |
/* NV bc space */ |
2204 |
else if (IS_BC_OFFSET(nv, NV_VMA_OFFSET(vma), NV_VMA_SIZE(vma))) |
2205 |
{ |
2206 |
if (nv_encode_caching(&vma->vm_page_prot, |
2207 |
NV_MEMORY_WRITECOMBINED, |
2208 |
NV_MEMORY_TYPE_FRAMEBUFFER)) |
2209 |
return -ENXIO; |
2210 |
|
2211 |
if (NV_REMAP_PAGE_RANGE(vma->vm_start, |
2212 |
NV_VMA_OFFSET(vma), |
2213 |
NV_VMA_SIZE(vma), |
2214 |
vma->vm_page_prot)) |
2215 |
return -EAGAIN; |
2216 |
|
2217 |
// mark it as IO so that we don't dump it on core dump |
2218 |
vma->vm_flags |= VM_IO; |
2219 |
} |
2220 |
|
2221 |
/* AGP allocator */ |
2222 |
else if (IS_AGP_OFFSET(nv, NV_VMA_OFFSET(vma), NV_VMA_SIZE(vma))) |
2223 |
{ |
2224 |
unsigned int i; |
2225 |
|
2226 |
down(&nvl->at_lock); |
2227 |
at = nvl_find_alloc(nvl, NV_VMA_OFFSET(vma), NV_ALLOC_TYPE_AGP); |
2228 |
|
2229 |
if (at == NULL) |
2230 |
{ |
2231 |
static int count = 0; |
2232 |
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES) |
2233 |
{ |
2234 |
nv_printf(NV_DBG_ERRORS, |
2235 |
"NVRM: nv_kern_mmap: invalid offset: 0x%08x @ 0x%016llx (AGP)\n", |
2236 |
NV_VMA_SIZE(vma), NV_VMA_OFFSET(vma)); |
2237 |
} |
2238 |
up(&nvl->at_lock); |
2239 |
return -EINVAL; |
2240 |
} |
2241 |
|
2242 |
if (nv_encode_caching(&vma->vm_page_prot, |
2243 |
NV_MEMORY_WRITECOMBINED, |
2244 |
NV_MEMORY_TYPE_AGP)) |
2245 |
{ |
2246 |
up(&nvl->at_lock); |
2247 |
return -ENXIO; |
2248 |
} |
2249 |
|
2250 |
NV_VMA_PRIVATE(vma) = at; |
2251 |
NV_ATOMIC_INC(at->usage_count); |
2252 |
up(&nvl->at_lock); |
2253 |
|
2254 |
if (NV_REMAP_PAGE_RANGE(vma->vm_start, |
2255 |
NV_VMA_OFFSET(vma), |
2256 |
NV_VMA_SIZE(vma), |
2257 |
vma->vm_page_prot)) |
2258 |
{ |
2259 |
NV_ATOMIC_DEC(at->usage_count); |
2260 |
return -EAGAIN; |
2261 |
} |
2262 |
|
2263 |
i = (NV_VMA_OFFSET(vma) - (NvUPtr)at->key_mapping) >> PAGE_SHIFT; |
2264 |
|
2265 |
NV_PRINT_AT(NV_DBG_MEMINFO, at); |
2266 |
nv_vm_list_page_count(&at->page_table[i], pages); |
2267 |
|
2268 |
// mark it as IO so that we don't dump it on core dump |
2269 |
vma->vm_flags |= VM_IO; |
2270 |
} |
2271 |
|
2272 |
/* Magic allocator */ |
2273 |
else // if (NV_VMA_OFFSET(vma) == NV_MMAP_ALLOCATION_OFFSET) |
2274 |
{ |
2275 |
unsigned long start = 0; |
2276 |
unsigned int i, j; |
2277 |
|
2278 |
down(&nvl->at_lock); |
2279 |
at = nvl_find_alloc(nvl, NV_VMA_OFFSET(vma), NV_ALLOC_TYPE_PCI); |
2280 |
|
2281 |
if (at == NULL) |
2282 |
{ |
2283 |
static int count = 0; |
2284 |
up(&nvl->at_lock); |
2285 |
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES) |
2286 |
{ |
2287 |
nv_printf(NV_DBG_ERRORS, |
2288 |
"NVRM: nv_kern_mmap: invalid offset: 0x%08x @ 0x%016llx (PCI)\n", |
2289 |
NV_VMA_SIZE(vma), NV_VMA_OFFSET(vma)); |
2290 |
} |
2291 |
return -EINVAL; |
2292 |
} |
2293 |
|
2294 |
for (i = 0; i < at->num_pages; i++) |
2295 |
{ |
2296 |
if ((NV_VMA_OFFSET(vma) == at->page_table[i]->phys_addr) |
2297 |
|| (NV_VMA_OFFSET(vma) == at->page_table[i]->dma_addr)) |
2298 |
break; |
2299 |
} |
2300 |
|
2301 |
if (i == at->num_pages) /* sanity check */ |
2302 |
{ |
2303 |
up(&nvl->at_lock); |
2304 |
return -EINVAL; |
2305 |
} |
2306 |
|
2307 |
if ((i + pages) > at->num_pages) |
2308 |
{ |
2309 |
nv_printf(NV_DBG_ERRORS, |
2310 |
"NVRM: requested mapping exceeds allocation's boundary!\n"); |
2311 |
up(&nvl->at_lock); |
2312 |
return -EINVAL; |
2313 |
} |
2314 |
|
2315 |
if (nv_encode_caching(&vma->vm_page_prot, |
2316 |
NV_ALLOC_MAPPING(at->flags), |
2317 |
NV_MEMORY_TYPE_SYSTEM)) |
2318 |
{ |
2319 |
up(&nvl->at_lock); |
2320 |
return -ENXIO; |
2321 |
} |
2322 |
|
2323 |
NV_VMA_PRIVATE(vma) = at; |
2324 |
NV_ATOMIC_INC(at->usage_count); |
2325 |
up(&nvl->at_lock); |
2326 |
|
2327 |
nv_printf(NV_DBG_INFO, |
2328 |
"NVRM: remapping %d system pages, index %d, for 'at' 0x%p\n", pages, i, at); |
2329 |
|
2330 |
start = vma->vm_start; |
2331 |
for (j = i; j < (i + pages); j++) |
2332 |
{ |
2333 |
nv_verify_page_mappings(at->page_table[j], NV_ALLOC_MAPPING(at->flags)); |
2334 |
#if defined(NV_VM_INSERT_PAGE_PRESENT) |
2335 |
if (NV_VM_INSERT_PAGE(vma, start, |
2336 |
NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr))) |
2337 |
#else |
2338 |
if (NV_REMAP_PAGE_RANGE(start, at->page_table[j]->phys_addr, |
2339 |
PAGE_SIZE, vma->vm_page_prot)) |
2340 |
#endif |
2341 |
{ |
2342 |
NV_ATOMIC_DEC(at->usage_count); |
2343 |
return -EAGAIN; |
2344 |
} |
2345 |
start += PAGE_SIZE; |
2346 |
} |
2347 |
|
2348 |
NV_PRINT_AT(NV_DBG_MEMINFO, at); |
2349 |
nv_vm_list_page_count(&at->page_table[i], pages); |
2350 |
|
2351 |
/* prevent the swapper from swapping it out */ |
2352 |
/* mark the memory i/o so the buffers aren't dumped on core dumps */ |
2353 |
vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED); |
2354 |
} |
2355 |
|
2356 |
NV_VMA_FILE(vma) = file; |
2357 |
|
2358 |
return 0; |
2359 |
} |
2360 |
|
2361 |
unsigned int nv_kern_poll( |
2362 |
struct file *file, |
2363 |
poll_table *wait |
2364 |
) |
2365 |
{ |
2366 |
unsigned int mask = 0; |
2367 |
nv_file_private_t *nvfp; |
2368 |
nv_linux_state_t *nvl; |
2369 |
unsigned long eflags; |
2370 |
|
2371 |
nvl = NVL_FROM_FILEP(file); |
2372 |
|
2373 |
if (NV_IS_CONTROL_DEVICE(file->f_dentry->d_inode)) |
2374 |
return nv_kern_ctl_poll(file, wait); |
2375 |
|
2376 |
nvfp = NV_GET_NVFP(file); |
2377 |
|
2378 |
if ( !(file->f_flags & O_NONBLOCK)) |
2379 |
{ |
2380 |
// add us to the list |
2381 |
poll_wait(file, &nvfp->waitqueue, wait); |
2382 |
} |
2383 |
|
2384 |
NV_SPIN_LOCK_IRQSAVE(&nvfp->fp_lock, eflags); |
2385 |
|
2386 |
// wake the user on any event |
2387 |
if (nvfp->event_head != NULL) |
2388 |
{ |
2389 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: Hey, an event occured!\n"); |
2390 |
// trigger the client, when they grab the event, |
2391 |
// we'll decrement the event count |
2392 |
mask |= (POLLPRI|POLLIN); |
2393 |
} |
2394 |
|
2395 |
NV_SPIN_UNLOCK_IRQRESTORE(&nvfp->fp_lock, eflags); |
2396 |
|
2397 |
return mask; |
2398 |
} |
2399 |
|
2400 |
#define NV_CTL_DEVICE_ONLY(nv) \ |
2401 |
{ \ |
2402 |
if (((nv)->flags & NV_FLAG_CONTROL) == 0) \ |
2403 |
{ \ |
2404 |
status = -EINVAL; \ |
2405 |
goto done; \ |
2406 |
} \ |
2407 |
} |
2408 |
|
2409 |
int nv_kern_ioctl( |
2410 |
struct inode *inode, |
2411 |
struct file *file, |
2412 |
unsigned int cmd, |
2413 |
unsigned long i_arg) |
2414 |
{ |
2415 |
RM_STATUS rmStatus; |
2416 |
int status = 0; |
2417 |
nv_linux_state_t *nvl; |
2418 |
nv_state_t *nv; |
2419 |
void *arg = (void *) i_arg; |
2420 |
void *arg_copy; |
2421 |
int arg_size; |
2422 |
|
2423 |
nvl = NVL_FROM_FILEP(file); |
2424 |
nv = NV_STATE_PTR(nvl); |
2425 |
|
2426 |
nv_printf(NV_DBG_INFO, "NVRM: ioctl(0x%x, 0x%x, 0x%x)\n", |
2427 |
_IOC_NR(cmd), (unsigned int) i_arg, _IOC_SIZE(cmd)); |
2428 |
|
2429 |
nv_verify_pci_config(nv); |
2430 |
|
2431 |
arg_size = _IOC_SIZE(cmd); |
2432 |
NV_KMALLOC(arg_copy, arg_size); |
2433 |
if (arg_copy == NULL) |
2434 |
{ |
2435 |
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate ioctl memory\n"); |
2436 |
return -ENOMEM; |
2437 |
} |
2438 |
|
2439 |
if (copy_from_user(arg_copy, arg, arg_size)) |
2440 |
{ |
2441 |
nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in ioctl data\n"); |
2442 |
NV_KFREE(arg_copy, arg_size); |
2443 |
return -ENOMEM; |
2444 |
} |
2445 |
|
2446 |
switch (_IOC_NR(cmd)) |
2447 |
{ |
2448 |
/* pass out info about the card */ |
2449 |
case NV_ESC_CARD_INFO: |
2450 |
{ |
2451 |
nv_ioctl_card_info_t *ci; |
2452 |
nv_linux_state_t *tnvl; |
2453 |
nv_ioctl_rm_api_old_version_t *rm_api; |
2454 |
U032 i; |
2455 |
|
2456 |
NV_CTL_DEVICE_ONLY(nv); |
2457 |
|
2458 |
if (arg_size < (sizeof(*ci) * num_nv_devices)) |
2459 |
{ |
2460 |
status = -EINVAL; |
2461 |
goto done; |
2462 |
} |
2463 |
|
2464 |
/* the first element of card info passed from the client will have |
2465 |
* the rm_api_version_magic value to show that the client is new |
2466 |
* enough to support versioning. If the client is too old to |
2467 |
* support versioning, our mmap interfaces are probably different |
2468 |
* enough to cause serious damage. |
2469 |
* just copy in the one dword to check. |
2470 |
*/ |
2471 |
rm_api = arg_copy; |
2472 |
switch (rm_api->magic) |
2473 |
{ |
2474 |
case NV_RM_API_OLD_VERSION_MAGIC_REQ: |
2475 |
case NV_RM_API_OLD_VERSION_MAGIC_LAX_REQ: |
2476 |
case NV_RM_API_OLD_VERSION_MAGIC_OVERRIDE_REQ: |
2477 |
/* the client is using the old major-minor-patch |
2478 |
* API version check; reject it. |
2479 |
*/ |
2480 |
nv_printf(NV_DBG_ERRORS, |
2481 |
"NVRM: API mismatch: the client has the version %d.%d-%d, but\n" |
2482 |
"NVRM: this kernel module has the version %s. Please\n" |
2483 |
"NVRM: make sure that this kernel module and all NVIDIA driver\n" |
2484 |
"NVRM: components have the same version.\n", |
2485 |
rm_api->major, rm_api->minor, rm_api->patch, |
2486 |
NV_VERSION_STRING); |
2487 |
status = -EINVAL; |
2488 |
goto done; |
2489 |
|
2490 |
case NV_RM_API_OLD_VERSION_MAGIC_IGNORE: |
2491 |
/* the client is telling us to ignore the old |
2492 |
* version scheme; it will do a version check via |
2493 |
* NV_ESC_CHECK_VERSION_STR |
2494 |
*/ |
2495 |
break; |
2496 |
default: |
2497 |
nv_printf(NV_DBG_ERRORS, |
2498 |
"NVRM: client does not support versioning!!\n"); |
2499 |
status = -EINVAL; |
2500 |
goto done; |
2501 |
} |
2502 |
|
2503 |
ci = arg_copy; |
2504 |
memset(ci, 0, arg_size); |
2505 |
for (i = 0; i < num_nv_devices; i++) |
2506 |
{ |
2507 |
nv_state_t *tnv; |
2508 |
tnvl = &nv_linux_devices[i]; |
2509 |
tnv = NV_STATE_PTR(tnvl); |
2510 |
if (tnv->device_id) |
2511 |
{ |
2512 |
ci->flags = NV_IOCTL_CARD_INFO_FLAG_PRESENT; |
2513 |
ci->bus = tnv->bus; |
2514 |
ci->slot = tnv->slot; |
2515 |
ci->vendor_id = tnv->vendor_id; |
2516 |
ci->device_id = tnv->device_id; |
2517 |
ci->interrupt_line = tnv->interrupt_line; |
2518 |
ci->reg_address = tnv->regs->address; |
2519 |
ci->reg_size = tnv->regs->size; |
2520 |
ci->fb_address = tnv->fb->address; |
2521 |
ci->fb_size = tnv->fb->size; |
2522 |
ci++; |
2523 |
} |
2524 |
} |
2525 |
break; |
2526 |
} |
2527 |
|
2528 |
/* set a card to be posted */ |
2529 |
case NV_ESC_POST_VBIOS: |
2530 |
{ |
2531 |
NV_CTL_DEVICE_ONLY(nv); |
2532 |
|
2533 |
status = nvos_post_vbios(arg_copy, arg_size); |
2534 |
break; |
2535 |
} |
2536 |
|
2537 |
case NV_ESC_CHECK_VERSION_STR: |
2538 |
{ |
2539 |
NV_CTL_DEVICE_ONLY(nv); |
2540 |
|
2541 |
rmStatus = rm_perform_version_check(arg_copy, arg_size); |
2542 |
status = ((rmStatus == RM_OK) ? 0 : -EINVAL); |
2543 |
break; |
2544 |
} |
2545 |
|
2546 |
default: |
2547 |
rmStatus = rm_ioctl(nv, file, _IOC_NR(cmd), arg_copy, arg_size); |
2548 |
status = ((rmStatus == RM_OK) ? 0 : -EINVAL); |
2549 |
break; |
2550 |
} |
2551 |
|
2552 |
done: |
2553 |
if (copy_to_user(arg, arg_copy, arg_size)) |
2554 |
{ |
2555 |
nv_printf(NV_DBG_ERRORS, "NVRM: failed to copyout ioctl data\n"); |
2556 |
status = -EFAULT; |
2557 |
} |
2558 |
NV_KFREE(arg_copy, arg_size); |
2559 |
return status; |
2560 |
} |
2561 |
|
2562 |
long nv_kern_unlocked_ioctl( |
2563 |
struct file *file, |
2564 |
unsigned int cmd, |
2565 |
unsigned long i_arg |
2566 |
) |
2567 |
{ |
2568 |
return nv_kern_ioctl(file->f_dentry->d_inode, file, cmd, i_arg); |
2569 |
} |
2570 |
|
2571 |
long nv_kern_compat_ioctl( |
2572 |
struct file *file, |
2573 |
unsigned int cmd, |
2574 |
unsigned long i_arg |
2575 |
) |
2576 |
{ |
2577 |
return nv_kern_ioctl(file->f_dentry->d_inode, file, cmd, i_arg); |
2578 |
} |
2579 |
|
2580 |
/* |
2581 |
* driver receives an interrupt |
2582 |
* if someone waiting, then hand it off. |
2583 |
*/ |
2584 |
irqreturn_t nv_kern_isr( |
2585 |
int irq, |
2586 |
void *arg |
2587 |
#if !defined(NV_IRQ_HANDLER_T_PRESENT) || (NV_IRQ_HANDLER_T_ARGUMENT_COUNT == 3) |
2588 |
,struct pt_regs *regs |
2589 |
#endif |
2590 |
) |
2591 |
{ |
2592 |
nv_linux_state_t *nvl = (void *) arg; |
2593 |
nv_state_t *nv = NV_STATE_PTR(nvl); |
2594 |
U032 need_to_run_bottom_half = 0; |
2595 |
BOOL ret; |
2596 |
|
2597 |
nv_verify_pci_config(nv); |
2598 |
|
2599 |
ret = rm_isr(nv, &need_to_run_bottom_half); |
2600 |
if (need_to_run_bottom_half) |
2601 |
{ |
2602 |
tasklet_schedule(&nvl->tasklet); |
2603 |
} |
2604 |
|
2605 |
return IRQ_RETVAL(ret); |
2606 |
} |
2607 |
|
2608 |
void nv_kern_isr_bh( |
2609 |
unsigned long data |
2610 |
) |
2611 |
{ |
2612 |
nv_state_t *nv = (nv_state_t *) data; |
2613 |
/* |
2614 |
* XXX: This level of indirection is necessary to work around |
2615 |
* problems with Linux kernels using a non-standard calling |
2616 |
* convention, i.e. Arjan van de Ven's/RedHat's 2.6.0 kernels. |
2617 |
*/ |
2618 |
nv_verify_pci_config(nv); |
2619 |
rm_isr_bh(nv); |
2620 |
} |
2621 |
|
2622 |
void nv_kern_rc_timer( |
2623 |
unsigned long data |
2624 |
) |
2625 |
{ |
2626 |
nv_linux_state_t *nvl = (nv_linux_state_t *) data; |
2627 |
nv_state_t *nv = NV_STATE_PTR(nvl); |
2628 |
|
2629 |
// nv_printf(NV_DBG_INFO, "NVRM: rc timer\n"); |
2630 |
|
2631 |
nv_verify_pci_config(nv); |
2632 |
rm_run_rc_callback(nv); |
2633 |
mod_timer(&nvl->rc_timer, jiffies + HZ); /* set another timeout in 1 second */ |
2634 |
} |
2635 |
|
2636 |
#if defined(NV_PM_SUPPORT_OLD_STYLE_APM) |
2637 |
/* kernel calls us with a power management event */ |
2638 |
static int |
2639 |
nv_kern_apm_event( |
2640 |
struct pm_dev *dev, |
2641 |
pm_request_t rqst, |
2642 |
void *data |
2643 |
) |
2644 |
{ |
2645 |
nv_state_t *nv; |
2646 |
nv_linux_state_t *lnv; |
2647 |
U032 devnum; |
2648 |
int status = RM_OK; |
2649 |
|
2650 |
nv_printf(NV_DBG_INFO, "NVRM: nv_kern_apm_event: %d (0x%p)\n", rqst, data); |
2651 |
|
2652 |
for (devnum = 0; devnum < num_nv_devices; devnum++) |
2653 |
{ |
2654 |
if (apm_nv_dev[devnum] == dev) |
2655 |
{ |
2656 |
break; |
2657 |
} |
2658 |
} |
2659 |
|
2660 |
if (devnum == num_nv_devices) |
2661 |
{ |
2662 |
nv_printf(NV_DBG_WARNINGS, "NVRM: APM: invalid device!\n"); |
2663 |
return 1; |
2664 |
} |
2665 |
|
2666 |
lnv = &nv_linux_devices[devnum]; |
2667 |
nv = NV_STATE_PTR(lnv); |
2668 |
|
2669 |
nv_verify_pci_config(nv); |
2670 |
|
2671 |
switch (rqst) |
2672 |
{ |
2673 |
case PM_RESUME: |
2674 |
nv_printf(NV_DBG_INFO, "NVRM: APM: received resume event\n"); |
2675 |
__nv_enable_pat_support(); |
2676 |
status = rm_power_management(nv, 0, NV_PM_APM_RESUME); |
2677 |
break; |
2678 |
|
2679 |
case PM_SUSPEND: |
2680 |
nv_printf(NV_DBG_INFO, "NVRM: APM: received suspend event\n"); |
2681 |
status = rm_power_management(nv, 0, NV_PM_APM_SUSPEND); |
2682 |
__nv_disable_pat_support(); |
2683 |
break; |
2684 |
|
2685 |
// 2.4 kernels sent a PM_SAVE_STATE request when powering down via |
2686 |
// ACPI. just ignore it and return success so the power down works |
2687 |
case PM_SAVE_STATE: |
2688 |
status = RM_OK; |
2689 |
break; |
2690 |
|
2691 |
default: |
2692 |
nv_printf(NV_DBG_WARNINGS, "NVRM: APM: unsupported event: %d\n", rqst); |
2693 |
return 1; |
2694 |
} |
2695 |
|
2696 |
if (status != RM_OK) |
2697 |
nv_printf(NV_DBG_ERRORS, "NVRM: APM: failed event: %d\n", rqst); |
2698 |
|
2699 |
return status; |
2700 |
} |
2701 |
#endif /* defined(NV_PM_SUPPORT_OLD_STYLE_APM) */ |
2702 |
|
2703 |
/* |
2704 |
** nv_kern_ctl_open |
2705 |
** |
2706 |
** nv control driver open entry point. Sessions are created here. |
2707 |
*/ |
2708 |
int nv_kern_ctl_open( |
2709 |
struct inode *inode, |
2710 |
struct file *file |
2711 |
) |
2712 |
{ |
2713 |
nv_state_t *nv; |
2714 |
nv_linux_state_t *nvl; |
2715 |
int rc = 0; |
2716 |
|
2717 |
nvl = &nv_ctl_device; |
2718 |
nv = (nv_state_t *) nvl; |
2719 |
|
2720 |
nv_printf(NV_DBG_INFO, "NVRM: nv_kern_ctl_open\n"); |
2721 |
|
2722 |
down(&nvl->ldata_lock); |
2723 |
|
2724 |
/* save the nv away in file->private_data */ |
2725 |
NVL_FROM_FILEP(file) = nvl; |
2726 |
|
2727 |
if (NV_ATOMIC_READ(nvl->usage_count) == 0) |
2728 |
{ |
2729 |
init_waitqueue_head(&nv_ctl_waitqueue); |
2730 |
} |
2731 |
|
2732 |
nv->flags |= NV_FLAG_OPEN + NV_FLAG_CONTROL; |
2733 |
|
2734 |
/* turn off the hotkey occurred bit */ |
2735 |
nv->flags &= ~NV_FLAG_HOTKEY_OCCURRED; |
2736 |
|
2737 |
NV_ATOMIC_INC(nvl->usage_count); |
2738 |
up(&nvl->ldata_lock); |
2739 |
|
2740 |
return rc; |
2741 |
} |
2742 |
|
2743 |
|
2744 |
/* |
2745 |
** nv_kern_ctl_close |
2746 |
*/ |
2747 |
int nv_kern_ctl_close( |
2748 |
struct inode *inode, |
2749 |
struct file *file |
2750 |
) |
2751 |
{ |
2752 |
nv_linux_state_t *nvl = NVL_FROM_FILEP(file); |
2753 |
nv_state_t *nv = NV_STATE_PTR(nvl); |
2754 |
|
2755 |
nv_printf(NV_DBG_INFO, "NVRM: nv_kern_ctl_close\n"); |
2756 |
|
2757 |
down(&nvl->ldata_lock); |
2758 |
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count)) |
2759 |
{ |
2760 |
nv->flags &= ~(NV_FLAG_OPEN | NV_FLAG_HOTKEY_OCCURRED); |
2761 |
} |
2762 |
up(&nvl->ldata_lock); |
2763 |
|
2764 |
rm_free_unused_clients(nv, (void *)file); |
2765 |
|
2766 |
if (FILE_PRIVATE(file)) |
2767 |
{ |
2768 |
nv_free_file_private(FILE_PRIVATE(file)); |
2769 |
FILE_PRIVATE(file) = NULL; |
2770 |
} |
2771 |
|
2772 |
return 0; |
2773 |
} |
2774 |
|
2775 |
|
2776 |
/* |
2777 |
* nv_kern_ctl_poll() - add the process to the wait queue |
2778 |
*/ |
2779 |
|
2780 |
unsigned int nv_kern_ctl_poll( |
2781 |
struct file *file, |
2782 |
poll_table *wait |
2783 |
) |
2784 |
{ |
2785 |
nv_linux_state_t *nvl; |
2786 |
nv_state_t *nv; |
2787 |
unsigned int ret = 0; |
2788 |
|
2789 |
nvl = NVL_FROM_FILEP(file); |
2790 |
nv = NV_STATE_PTR(nvl); |
2791 |
|
2792 |
if ( !(file->f_flags & O_NONBLOCK) ) |
2793 |
{ |
2794 |
poll_wait(file, &nv_ctl_waitqueue, wait); |
2795 |
} |
2796 |
|
2797 |
nv_lock_rm(nv); |
2798 |
if (nv->flags & NV_FLAG_HOTKEY_OCCURRED) |
2799 |
{ |
2800 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: a hotkey event has occurred\n"); |
2801 |
nv->flags &= ~NV_FLAG_HOTKEY_OCCURRED; |
2802 |
ret = POLLIN | POLLRDNORM; |
2803 |
} |
2804 |
nv_unlock_rm(nv); |
2805 |
|
2806 |
return ret; |
2807 |
} |
2808 |
|
2809 |
|
2810 |
|
2811 |
|
2812 |
/* |
2813 |
* nv_set_hotkey_occurred_flag() - set the hotkey flag and wake up anybody |
2814 |
* waiting on the wait queue |
2815 |
*/ |
2816 |
|
2817 |
void NV_API_CALL nv_set_hotkey_occurred_flag(void) |
2818 |
{ |
2819 |
nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); |
2820 |
|
2821 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: setting the hotkey occurred flag!\n"); |
2822 |
|
2823 |
nv_lock_rm(nv); |
2824 |
nv_ctl_device.nv_state.flags |= NV_FLAG_HOTKEY_OCCURRED; |
2825 |
nv_unlock_rm(nv); |
2826 |
|
2827 |
wake_up_interruptible(&nv_ctl_waitqueue); |
2828 |
} |
2829 |
|
2830 |
void NV_API_CALL nv_set_dma_address_size( |
2831 |
nv_state_t *nv, |
2832 |
U032 phys_addr_bits |
2833 |
) |
2834 |
{ |
2835 |
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
2836 |
|
2837 |
#ifdef NV_SWIOTLB |
2838 |
if (swiotlb && !nv_swiotlb && phys_addr_bits<=32) |
2839 |
{ |
2840 |
nv_prints(NV_DBG_ERRORS, __swiotlb_warning); |
2841 |
nvos_proc_add_text_file(proc_nvidia_warnings, "swiotlb", |
2842 |
__swiotlb_warning); |
2843 |
nv_swiotlb = 1; |
2844 |
} |
2845 |
#endif |
2846 |
|
2847 |
nvl->dev->dma_mask = (((u64)1) << phys_addr_bits) - 1; |
2848 |
} |
2849 |
|
2850 |
static int |
2851 |
nv_kern_read_cardinfo(char *page, char **start, off_t off, |
2852 |
int count, int *eof, void *data) |
2853 |
{ |
2854 |
struct pci_dev *dev; |
2855 |
char *type, *fmt, tmpstr[NV_DEVICE_NAME_LENGTH]; |
2856 |
int len = 0, status; |
2857 |
U032 vbios_rev1, vbios_rev2, vbios_rev3, vbios_rev4, vbios_rev5; |
2858 |
|
2859 |
nv_state_t *nv; |
2860 |
nv = (nv_state_t *) data; |
2861 |
*eof = 1; |
2862 |
|
2863 |
dev = nv_get_pci_device(nv); |
2864 |
if (!dev) |
2865 |
return 0; |
2866 |
|
2867 |
if (rm_get_device_name(nv, dev->device, NV_DEVICE_NAME_LENGTH, |
2868 |
tmpstr) != RM_OK) { |
2869 |
strcpy (tmpstr, "Unknown"); |
2870 |
} |
2871 |
|
2872 |
len += sprintf(page+len, "Model: \t\t %s\n", tmpstr); |
2873 |
len += sprintf(page+len, "IRQ: \t\t %d\n", nv->interrupt_line); |
2874 |
|
2875 |
status = rm_get_vbios_version(nv, &vbios_rev1, &vbios_rev2, |
2876 |
&vbios_rev3, &vbios_rev4, &vbios_rev5); |
2877 |
|
2878 |
if (status < 0) { |
2879 |
/* before rm_init_adapter */ |
2880 |
len += sprintf(page+len, "Video BIOS: \t ??.??.??.??.??\n"); |
2881 |
} else { |
2882 |
fmt = "Video BIOS: \t %02x.%02x.%02x.%02x.%02x\n"; |
2883 |
len += sprintf(page+len, fmt, vbios_rev1, vbios_rev2, vbios_rev3, |
2884 |
vbios_rev4, vbios_rev5); |
2885 |
} |
2886 |
|
2887 |
if (nvos_find_agp_capability(dev)) type = "AGP"; |
2888 |
else if (nvos_find_pci_express_capability(dev)) type = "PCI-E"; |
2889 |
else type = "PCI"; |
2890 |
len += sprintf(page+len, "Card Type: \t %s\n", type); |
2891 |
|
2892 |
// Report the number of bits set in dev->dma_mask |
2893 |
len += sprintf(page+len, "DMA Size: \t %d bits\n", |
2894 |
nv_count_bits(dev->dma_mask)); |
2895 |
len += sprintf(page+len, "DMA Mask: \t 0x%llx\n", dev->dma_mask); |
2896 |
|
2897 |
NV_PCI_DEV_PUT(dev); |
2898 |
return len; |
2899 |
} |
2900 |
|
2901 |
static int |
2902 |
nv_kern_read_version(char *page, char **start, off_t off, |
2903 |
int count, int *eof, void *data) |
2904 |
{ |
2905 |
int len = 0; |
2906 |
*eof = 1; |
2907 |
|
2908 |
len += sprintf(page+len, "NVRM version: %s\n", pNVRM_ID); |
2909 |
len += sprintf(page+len, "GCC version: %s\n", NV_COMPILER); |
2910 |
|
2911 |
return len; |
2912 |
} |
2913 |
|
2914 |
static int |
2915 |
nv_kern_read_agpinfo(char *page, char **start, off_t off, |
2916 |
int count, int *eof, void *data) |
2917 |
{ |
2918 |
struct pci_dev *dev; |
2919 |
char *fw, *sba; |
2920 |
u8 cap_ptr; |
2921 |
u32 status, command, agp_rate; |
2922 |
int len = 0; |
2923 |
|
2924 |
nv_state_t *nv; |
2925 |
nv = (nv_state_t *) data; |
2926 |
*eof = 1; |
2927 |
|
2928 |
if (nv) { |
2929 |
dev = nv_get_pci_device(nv); |
2930 |
if (!dev) |
2931 |
return 0; |
2932 |
} else { |
2933 |
dev = nvos_get_agp_device_by_class(PCI_CLASS_BRIDGE_HOST); |
2934 |
if (!dev) |
2935 |
return 0; |
2936 |
|
2937 |
len += sprintf(page+len, "Host Bridge: \t "); |
2938 |
|
2939 |
#if defined(CONFIG_PCI_NAMES) |
2940 |
len += sprintf(page+len, "%s\n", NV_PCI_DEVICE_NAME(dev)); |
2941 |
#else |
2942 |
len += sprintf(page+len, "PCI device %04x:%04x\n", |
2943 |
dev->vendor, dev->device); |
2944 |
#endif |
2945 |
} |
2946 |
|
2947 |
/* what can this AGP device do? */ |
2948 |
cap_ptr = nvos_find_agp_capability(dev); |
2949 |
|
2950 |
pci_read_config_dword(dev, cap_ptr + 4, &status); |
2951 |
pci_read_config_dword(dev, cap_ptr + 8, &command); |
2952 |
|
2953 |
fw = (status & 0x00000010) ? "Supported" : "Not Supported"; |
2954 |
sba = (status & 0x00000200) ? "Supported" : "Not Supported"; |
2955 |
|
2956 |
len += sprintf(page+len, "Fast Writes: \t %s\n", fw); |
2957 |
len += sprintf(page+len, "SBA: \t\t %s\n", sba); |
2958 |
|
2959 |
agp_rate = status & 0x7; |
2960 |
if (status & 0x8) // agp 3.0 |
2961 |
agp_rate <<= 2; |
2962 |
|
2963 |
len += sprintf(page+len, "AGP Rates: \t %s%s%s%s\n", |
2964 |
(agp_rate & 0x00000008) ? "8x " : "", |
2965 |
(agp_rate & 0x00000004) ? "4x " : "", |
2966 |
(agp_rate & 0x00000002) ? "2x " : "", |
2967 |
(agp_rate & 0x00000001) ? "1x " : ""); |
2968 |
|
2969 |
len += sprintf(page+len, "Registers: \t 0x%08x:0x%08x\n", status, command); |
2970 |
|
2971 |
NV_PCI_DEV_PUT(dev); |
2972 |
return len; |
2973 |
} |
2974 |
|
2975 |
static int |
2976 |
nv_kern_read_status(char *page, char **start, off_t off, |
2977 |
int count, int *eof, void *data) |
2978 |
{ |
2979 |
struct pci_dev *dev; |
2980 |
char *fw, *sba, *drv; |
2981 |
int len = 0; |
2982 |
u8 cap_ptr; |
2983 |
u32 scratch; |
2984 |
u32 status, command, agp_rate; |
2985 |
|
2986 |
nv_state_t *nv; |
2987 |
nv = (nv_state_t *) data; |
2988 |
*eof = 1; |
2989 |
|
2990 |
dev = nvos_get_agp_device_by_class(PCI_CLASS_BRIDGE_HOST); |
2991 |
if (!dev) |
2992 |
return 0; |
2993 |
cap_ptr = nvos_find_agp_capability(dev); |
2994 |
|
2995 |
pci_read_config_dword(dev, cap_ptr + 4, &status); |
2996 |
pci_read_config_dword(dev, cap_ptr + 8, &command); |
2997 |
NV_PCI_DEV_PUT(dev); |
2998 |
|
2999 |
dev = nvos_get_agp_device_by_class(PCI_CLASS_DISPLAY_VGA); |
3000 |
if (!dev) |
3001 |
return 0; |
3002 |
cap_ptr = nvos_find_agp_capability(dev); |
3003 |
|
3004 |
pci_read_config_dword(dev, cap_ptr + 4, &scratch); |
3005 |
status &= scratch; |
3006 |
pci_read_config_dword(dev, cap_ptr + 8, &scratch); |
3007 |
command &= scratch; |
3008 |
|
3009 |
if (NV_AGP_ENABLED(nv) && (command & 0x100)) { |
3010 |
len += sprintf(page+len, "Status: \t Enabled\n"); |
3011 |
|
3012 |
drv = NV_OSAGP_ENABLED(nv) ? "AGPGART" : "NVIDIA"; |
3013 |
len += sprintf(page+len, "Driver: \t %s\n", drv); |
3014 |
|
3015 |
// mask off agp rate. |
3016 |
// If this is agp 3.0, we need to shift the value |
3017 |
agp_rate = command & 0x7; |
3018 |
if (status & 0x8) // agp 3.0 |
3019 |
agp_rate <<= 2; |
3020 |
|
3021 |
len += sprintf(page+len, "AGP Rate: \t %dx\n", agp_rate); |
3022 |
|
3023 |
fw = (command & 0x00000010) ? "Enabled" : "Disabled"; |
3024 |
len += sprintf(page+len, "Fast Writes: \t %s\n", fw); |
3025 |
|
3026 |
sba = (command & 0x00000200) ? "Enabled" : "Disabled"; |
3027 |
len += sprintf(page+len, "SBA: \t\t %s\n", sba); |
3028 |
} else { |
3029 |
int agp_config = 0; |
3030 |
|
3031 |
len += sprintf(page+len, "Status: \t Disabled\n\n"); |
3032 |
|
3033 |
/* |
3034 |
* If we find AGP is disabled, but the RM registry indicates it |
3035 |
* was requested, direct the user to the kernel log (we, or even |
3036 |
* the kernel may have printed a warning/an error message). |
3037 |
* |
3038 |
* Note that the "XNvAGP" registry key reflects the user request |
3039 |
* and overrides the RM "NvAGP" key, if present. |
3040 |
*/ |
3041 |
rm_read_registry_dword(nv, "NVreg", "NvAGP", &agp_config); |
3042 |
rm_read_registry_dword(nv, "NVreg", "XNvAGP", &agp_config); |
3043 |
|
3044 |
if (agp_config != NVOS_AGP_CONFIG_DISABLE_AGP && NV_AGP_FAILED(nv)) { |
3045 |
len += sprintf(page+len, |
3046 |
"AGP initialization failed, please check the ouput \n" |
3047 |
"of the 'dmesg' command and/or your system log file \n" |
3048 |
"for additional information on this problem. \n"); |
3049 |
} |
3050 |
} |
3051 |
|
3052 |
NV_PCI_DEV_PUT(dev); |
3053 |
return len; |
3054 |
} |
3055 |
|
3056 |
extern nv_parm_t nv_parms[]; |
3057 |
extern char *NVreg_RegistryDwords; |
3058 |
|
3059 |
static int |
3060 |
nv_kern_read_registry(char *page, char **start, off_t off, |
3061 |
int count, int *eof, void *data) |
3062 |
{ |
3063 |
unsigned int i, len = 0; |
3064 |
nv_parm_t *entry; |
3065 |
*eof = 1; |
3066 |
|
3067 |
for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) |
3068 |
len += sprintf(page+len, "%s: %u\n", entry->name, *entry->data); |
3069 |
|
3070 |
len += sprintf(page+len, "RegistryDwords: \"%s\"\n", |
3071 |
(NVreg_RegistryDwords != NULL) ? NVreg_RegistryDwords : ""); |
3072 |
|
3073 |
return len; |
3074 |
} |
3075 |
|
3076 |
static int |
3077 |
nv_kern_read_text_file(char *page, char **start, off_t off, |
3078 |
int count, int *eof, void *data) |
3079 |
{ |
3080 |
*eof = 1; |
3081 |
return sprintf(page, "%s", (char *)data); |
3082 |
} |
3083 |
|
3084 |
/*** |
3085 |
*** EXPORTS to rest of resman |
3086 |
***/ |
3087 |
|
3088 |
/* |
3089 |
* Given a physical address, find the associated 'at', track down |
3090 |
* the actual page within the allocation and return a kernel virtual |
3091 |
* mapping to it. Make sure to save the page offset if the address |
3092 |
* isn't aligned. |
3093 |
* |
3094 |
* If the requested mapping spans more than one page, then determine |
3095 |
* the individual pages and create a mapping with vmap(). |
3096 |
*/ |
3097 |
void* NV_API_CALL nv_alloc_kernel_mapping( |
3098 |
nv_state_t *nv, |
3099 |
NvU64 address, |
3100 |
U032 size, |
3101 |
void **priv_data |
3102 |
) |
3103 |
{ |
3104 |
nv_alloc_t *at; |
3105 |
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
3106 |
U032 i, offset; |
3107 |
|
3108 |
down(&nvl->at_lock); |
3109 |
at = nvl_find_alloc(nvl, address, NV_ALLOC_TYPE_PCI); |
3110 |
if (at != NULL) |
3111 |
{ |
3112 |
offset = address & ~PAGE_MASK; |
3113 |
address &= PAGE_MASK; |
3114 |
|
3115 |
for (i = 0; i < at->num_pages; i++) |
3116 |
{ |
3117 |
if ((address == at->page_table[i]->phys_addr) |
3118 |
|| (address == at->page_table[i]->dma_addr)) |
3119 |
break; |
3120 |
} |
3121 |
|
3122 |
if (i == at->num_pages) /* not found */ |
3123 |
{ |
3124 |
up(&nvl->at_lock); |
3125 |
return NULL; |
3126 |
} |
3127 |
} |
3128 |
else |
3129 |
{ |
3130 |
at = nvl_find_alloc(nvl, address, NV_ALLOC_TYPE_AGP); |
3131 |
if (at != NULL) |
3132 |
{ |
3133 |
offset = address - (unsigned long) at->key_mapping; |
3134 |
i = offset >> PAGE_SHIFT; |
3135 |
offset = address & ~PAGE_MASK; |
3136 |
|
3137 |
if (at->page_table[i]->virt_addr == 0) |
3138 |
{ |
3139 |
up(&nvl->at_lock); |
3140 |
return NULL; |
3141 |
} |
3142 |
} |
3143 |
else |
3144 |
{ |
3145 |
up(&nvl->at_lock); |
3146 |
return NULL; /* not found */ |
3147 |
} |
3148 |
} |
3149 |
up(&nvl->at_lock); |
3150 |
|
3151 |
if ((size + offset) <= PAGE_SIZE) |
3152 |
{ |
3153 |
*priv_data = NULL; |
3154 |
return (void *)(at->page_table[i]->virt_addr + offset); |
3155 |
} |
3156 |
else |
3157 |
{ |
3158 |
#if defined(NV_VMAP_PRESENT) |
3159 |
U032 j, page_count; |
3160 |
unsigned long virt_addr; |
3161 |
struct page **pages; |
3162 |
|
3163 |
size += offset; /* adjust mapping size */ |
3164 |
page_count = (size >> PAGE_SHIFT) + ((size & ~PAGE_MASK) ? 1 : 0); |
3165 |
|
3166 |
if ((i + page_count) > at->num_pages) |
3167 |
{ |
3168 |
nv_printf(NV_DBG_ERRORS, |
3169 |
"NVRM: requested mapping exceeds allocation's boundary!\n"); |
3170 |
return NULL; |
3171 |
} |
3172 |
|
3173 |
NV_KMALLOC(pages, sizeof(struct page *) * page_count); |
3174 |
if (pages == NULL) |
3175 |
{ |
3176 |
nv_printf(NV_DBG_ERRORS, |
3177 |
"NVRM: failed to allocate vmap() page descriptor table!\n"); |
3178 |
return NULL; |
3179 |
} |
3180 |
|
3181 |
for (j = 0; j < page_count; j++) |
3182 |
pages[j] = NV_GET_PAGE_STRUCT(at->page_table[i+j]->phys_addr); |
3183 |
|
3184 |
NV_VMAP(virt_addr, pages, page_count, NV_ALLOC_MAPPING_CACHED(at->flags)); |
3185 |
NV_KFREE(pages, sizeof(struct page *) * page_count); |
3186 |
if (virt_addr == 0) |
3187 |
{ |
3188 |
nv_printf(NV_DBG_ERRORS, "NVRM: vmap() failed to map pages!\n"); |
3189 |
return NULL; |
3190 |
} |
3191 |
|
3192 |
*priv_data = (void *)(unsigned long)page_count; |
3193 |
return (void *)(virt_addr + offset); |
3194 |
#else |
3195 |
nv_printf(NV_DBG_ERRORS, |
3196 |
"NVRM: This version of the Linux kernel does not provide the vmap()\n" |
3197 |
"NVRM: kernel interface. If you see this message, please update\n" |
3198 |
"NVRM: your kernel to Linux 2.4.22 or install a distribution kernel\n" |
3199 |
"NVRM: that supports the vmap() kernel interface.\n"); |
3200 |
#endif |
3201 |
} |
3202 |
|
3203 |
return NULL; |
3204 |
} |
3205 |
|
3206 |
int NV_API_CALL nv_free_kernel_mapping( |
3207 |
nv_state_t *nv, |
3208 |
void *address, |
3209 |
void *priv_data |
3210 |
) |
3211 |
{ |
3212 |
#if defined(NV_VMAP_PRESENT) |
3213 |
unsigned long virt_addr; |
3214 |
U032 page_count; |
3215 |
|
3216 |
virt_addr = (unsigned long)address & PAGE_MASK; |
3217 |
|
3218 |
if (virt_addr >= VMALLOC_START && virt_addr < VMALLOC_END) |
3219 |
{ |
3220 |
page_count = (unsigned long)priv_data; |
3221 |
if (page_count == 0) |
3222 |
{ |
3223 |
nv_printf(NV_DBG_ERRORS, |
3224 |
"NVRM: nv_free_kernel_mapping(): invalid page count!\n"); |
3225 |
return RM_ERROR; |
3226 |
} |
3227 |
|
3228 |
NV_VUNMAP(virt_addr, page_count); |
3229 |
} |
3230 |
#endif |
3231 |
return RM_OK; |
3232 |
} |
3233 |
|
3234 |
|
3235 |
/* virtual address to physical page address */ |
3236 |
NvU64 nv_get_phys_address( |
3237 |
NvU64 address, |
3238 |
BOOL kern |
3239 |
) |
3240 |
{ |
3241 |
#if defined(NV_SET_PAGES_UC_PRESENT) |
3242 |
nv_printf(NV_DBG_ERRORS, |
3243 |
"NVRM: can't translate address in nv_get_phys_address()!\n"); |
3244 |
#else |
3245 |
struct mm_struct *mm; |
3246 |
pgd_t *pgd = NULL; |
3247 |
pmd_t *pmd = NULL; |
3248 |
pte_t *pte = NULL; |
3249 |
NvU64 retval; |
3250 |
|
3251 |
if (!kern) |
3252 |
{ |
3253 |
mm = current->mm; |
3254 |
down_read(&mm->mmap_sem); |
3255 |
} |
3256 |
else |
3257 |
mm = NULL; |
3258 |
|
3259 |
pgd = NV_PGD_OFFSET(address, kern, mm); |
3260 |
if (!NV_PGD_PRESENT(pgd)) |
3261 |
goto failed; |
3262 |
|
3263 |
pmd = NV_PMD_OFFSET(address, pgd); |
3264 |
if (!NV_PMD_PRESENT(pmd)) |
3265 |
goto failed; |
3266 |
|
3267 |
pte = NV_PTE_OFFSET(address, pmd); |
3268 |
if (!NV_PTE_PRESENT(pte)) |
3269 |
goto failed; |
3270 |
|
3271 |
retval = ((NV_PTE_VALUE(pte) & PAGE_MASK) | NV_MASK_OFFSET(address)); |
3272 |
|
3273 |
#if defined(NVCPU_X86_64) && defined(_PAGE_NX) |
3274 |
// mask out the non-executable page bit for the true physical address |
3275 |
retval &= ~_PAGE_NX; |
3276 |
#endif |
3277 |
|
3278 |
if (!kern) |
3279 |
up_read(&mm->mmap_sem); |
3280 |
return retval; |
3281 |
|
3282 |
failed: |
3283 |
if (!kern) |
3284 |
up_read(&mm->mmap_sem); |
3285 |
#endif |
3286 |
return 0; |
3287 |
} |
3288 |
|
3289 |
NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address) |
3290 |
{ |
3291 |
/* make sure this address is a kernel virtual address */ |
3292 |
#if defined(DEBUG) && !defined(CONFIG_X86_4G) |
3293 |
if (address < PAGE_OFFSET) |
3294 |
{ |
3295 |
nv_printf(NV_DBG_WARNINGS, |
3296 |
"NVRM: user address passed to get_kern_phys_address: 0x%llx!\n", |
3297 |
address); |
3298 |
return 0; |
3299 |
} |
3300 |
#endif |
3301 |
|
3302 |
/* direct-mapped kernel address */ |
3303 |
if ((address > PAGE_OFFSET) && (address < VMALLOC_START)) |
3304 |
return __pa(address); |
3305 |
|
3306 |
return nv_get_phys_address(address, TRUE); |
3307 |
} |
3308 |
|
3309 |
NvU64 NV_API_CALL nv_get_kern_user_address(NvU64 address) |
3310 |
{ |
3311 |
/* make sure this address is not a kernel virtual address */ |
3312 |
#if defined(DEBUG) && !defined(CONFIG_X86_4G) |
3313 |
if (address >= PAGE_OFFSET) |
3314 |
{ |
3315 |
nv_printf(NV_DBG_WARNINGS, |
3316 |
"NVRM: kernel address passed to get_user_phys_address: 0x%llx!\n", |
3317 |
address); |
3318 |
return 0; |
3319 |
} |
3320 |
#endif |
3321 |
|
3322 |
return nv_get_phys_address(address, FALSE); |
3323 |
} |
3324 |
|
3325 |
|
3326 |
/* allocate memory for DMA push buffers */ |
3327 |
int NV_API_CALL nv_alloc_pages( |
3328 |
nv_state_t *nv, |
3329 |
U032 page_count, |
3330 |
U032 agp_memory, |
3331 |
U032 contiguous, |
3332 |
U032 cache_type, |
3333 |
NvU64 *pte_array, |
3334 |
void **priv_data |
3335 |
) |
3336 |
{ |
3337 |
nv_alloc_t *at; |
3338 |
RM_STATUS rm_status = 0; |
3339 |
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
3340 |
U032 i; |
3341 |
|
3342 |
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_alloc_pages: %d pages\n", page_count); |
3343 |
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: agp %d contig %d cache_type %d\n", |
3344 |
agp_memory, contiguous, cache_type); |
3345 |
|
3346 |
/* if we can't support this caching, bail before we do any work */ |
3347 |
if (nv_encode_caching(NULL, cache_type, |
3348 |
agp_memory ? NV_MEMORY_TYPE_AGP : NV_MEMORY_TYPE_SYSTEM)) |
3349 |
return RM_ERROR; |
3350 |
|
3351 |
page_count = RM_PAGES_TO_OS_PAGES(page_count); |
3352 |
at = nvos_create_alloc(nvl->dev, page_count); |
3353 |
if (at == NULL) |
3354 |
return RM_ERROR; |
3355 |
|
3356 |
at->flags = nv_alloc_init_flags(cache_type, agp_memory, contiguous); |
3357 |
at->nv = nv; |
3358 |
|
3359 |
if (agp_memory) |
3360 |
{ |
3361 |
U032 offset; |
3362 |
|
3363 |
if (!NV_AGP_ENABLED(nv)) |
3364 |
goto failed; |
3365 |
|
3366 |
/* allocate agp-able memory */ |
3367 |
if (NV_OSAGP_ENABLED(nv)) |
3368 |
{ |
3369 |
/* agpgart will allocate all of the underlying memory */ |
3370 |
rm_status = KernAllocAGPPages(nv, page_count, priv_data, &offset); |
3371 |
if (rm_status) |
3372 |
goto failed; |
3373 |
|
3374 |
KernLoadAGPPages(nv, at, *priv_data); |
3375 |
} else { |
3376 |
rm_status = rm_alloc_agp_pages(nv, page_count, priv_data, &offset); |
3377 |
if (rm_status) |
3378 |
goto failed; |
3379 |
} |
3380 |
|
3381 |
at->priv_data = *priv_data; |
3382 |
nvl_add_alloc(nvl, at); |
3383 |
|
3384 |
pte_array[0] = (nv->agp.address + (offset << PAGE_SHIFT)); |
3385 |
at->key_mapping = (void *)(NvUPtr)pte_array[0]; |
3386 |
} |
3387 |
else |
3388 |
{ |
3389 |
if (nv_vm_malloc_pages(nv, at)) |
3390 |
goto failed; |
3391 |
|
3392 |
/* |
3393 |
* must be page-aligned or mmap will fail |
3394 |
* so use the first page, which is page-aligned. this way, our |
3395 |
* allocated page table does not need to be page-aligned |
3396 |
*/ |
3397 |
for (i = 0; i < ((contiguous) ? 1 : page_count); i++) |
3398 |
pte_array[i] = at->page_table[i]->dma_addr; |
3399 |
|
3400 |
at->key_mapping = (void *)at->page_table[0]->phys_addr; |
3401 |
nvl_add_alloc(nvl, at); |
3402 |
} |
3403 |
|
3404 |
*priv_data = at; |
3405 |
NV_ATOMIC_INC(at->usage_count); |
3406 |
|
3407 |
NV_PRINT_AT(NV_DBG_MEMINFO, at); |
3408 |
|
3409 |
return RM_OK; |
3410 |
|
3411 |
failed: |
3412 |
nvos_free_alloc(at); |
3413 |
|
3414 |
return -1; |
3415 |
} |
3416 |
|
3417 |
int NV_API_CALL nv_free_pages( |
3418 |
nv_state_t *nv, |
3419 |
U032 page_count, |
3420 |
U032 agp_memory, |
3421 |
U032 contiguous, |
3422 |
U032 cache_type, |
3423 |
void *priv_data |
3424 |
) |
3425 |
{ |
3426 |
int rmStatus = 0; |
3427 |
nv_alloc_t *at = priv_data; |
3428 |
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
3429 |
|
3430 |
page_count = RM_PAGES_TO_OS_PAGES(page_count); |
3431 |
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_free_pages: 0x%p 0x%x\n", |
3432 |
at->key_mapping, page_count); |
3433 |
|
3434 |
/* only lock ldata while removing 'at' from the list */ |
3435 |
down(&nvl->at_lock); |
3436 |
|
3437 |
NV_PRINT_AT(NV_DBG_MEMINFO, at); |
3438 |
|
3439 |
/* |
3440 |
* If the 'at' usage count doesn't drop to zero here, not all of |
3441 |
* the user mappings have been torn down in time - we can't |
3442 |
* safely free the memory. We report success back to the RM, but |
3443 |
* defer the actual free until later. |
3444 |
* |
3445 |
* This is described in greater detail in the comments above the |
3446 |
* nv_kern_vma_(open|release)() callbacks above. |
3447 |
*/ |
3448 |
if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count)) |
3449 |
{ |
3450 |
up(&nvl->at_lock); |
3451 |
return 0; |
3452 |
} |
3453 |
|
3454 |
nvl_remove_alloc(nvl, at); |
3455 |
up(&nvl->at_lock); |
3456 |
|
3457 |
if (agp_memory) |
3458 |
{ |
3459 |
if (!NV_AGP_ENABLED(nv)) |
3460 |
return -1; |
3461 |
|
3462 |
if (NV_OSAGP_ENABLED(nv)) |
3463 |
{ |
3464 |
rmStatus = KernFreeAGPPages(nv, at->priv_data); |
3465 |
} else { |
3466 |
rmStatus = rm_free_agp_pages(nv, at->priv_data); |
3467 |
} |
3468 |
} else |
3469 |
nv_vm_free_pages(nv, at); |
3470 |
|
3471 |
nvos_free_alloc(at); |
3472 |
|
3473 |
return rmStatus; |
3474 |
} |
3475 |
|
3476 |
NvU64 NV_API_CALL nv_dma_to_mmap_token( |
3477 |
nv_state_t *nv, |
3478 |
NvU64 address |
3479 |
) |
3480 |
{ |
3481 |
return address; |
3482 |
} |
3483 |
|
3484 |
static void nv_lock_init_locks |
3485 |
( |
3486 |
nv_state_t *nv |
3487 |
) |
3488 |
{ |
3489 |
nv_linux_state_t *nvl; |
3490 |
nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
3491 |
|
3492 |
NV_SPIN_LOCK_INIT(&nvl->rm_lock); |
3493 |
|
3494 |
NV_INIT_MUTEX(&nvl->ldata_lock); |
3495 |
NV_INIT_MUTEX(&nvl->at_lock); |
3496 |
|
3497 |
NV_ATOMIC_SET(nvl->usage_count, 0); |
3498 |
|
3499 |
nvl->rm_lock_cpu = -1; |
3500 |
nvl->rm_lock_count = 0; |
3501 |
} |
3502 |
|
3503 |
void NV_API_CALL nv_lock_rm( |
3504 |
nv_state_t *nv |
3505 |
) |
3506 |
{ |
3507 |
nv_linux_state_t *nvl; |
3508 |
int cpu; |
3509 |
|
3510 |
nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
3511 |
cpu = get_cpu(); |
3512 |
|
3513 |
if (nvl->rm_lock_cpu == cpu) |
3514 |
{ |
3515 |
nvl->rm_lock_count++; |
3516 |
put_cpu(); |
3517 |
return; |
3518 |
} |
3519 |
|
3520 |
put_cpu(); |
3521 |
NV_SPIN_UNLOCK_WAIT(&nvl->rm_lock); |
3522 |
NV_SPIN_LOCK_IRQ(&nvl->rm_lock); |
3523 |
|
3524 |
nvl->rm_lock_cpu = smp_processor_id(); |
3525 |
nvl->rm_lock_count = 1; |
3526 |
} |
3527 |
|
3528 |
void NV_API_CALL nv_unlock_rm( |
3529 |
nv_state_t *nv |
3530 |
) |
3531 |
{ |
3532 |
nv_linux_state_t *nvl; |
3533 |
nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
3534 |
|
3535 |
if (--nvl->rm_lock_count) |
3536 |
return; |
3537 |
|
3538 |
nvl->rm_lock_cpu = -1; |
3539 |
NV_SPIN_UNLOCK_IRQ(&nvl->rm_lock); |
3540 |
} |
3541 |
|
3542 |
/* |
3543 |
** post the event |
3544 |
*/ |
3545 |
|
3546 |
void NV_API_CALL nv_post_event( |
3547 |
nv_state_t *nv, |
3548 |
nv_event_t *event, |
3549 |
U032 handle, |
3550 |
U032 index |
3551 |
) |
3552 |
{ |
3553 |
struct file *file = (struct file *) event->file; |
3554 |
nv_file_private_t *nvfp = NV_GET_NVFP(file); |
3555 |
unsigned long eflags; |
3556 |
nvidia_event_t *nvet; |
3557 |
|
3558 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: posting event on 0x%x:0x%x\n", |
3559 |
event, nvfp); |
3560 |
|
3561 |
NV_KMALLOC_ATOMIC(nvet, sizeof(nvidia_event_t)); |
3562 |
|
3563 |
if (nvet == NULL) |
3564 |
return; |
3565 |
|
3566 |
NV_SPIN_LOCK_IRQSAVE(&nvfp->fp_lock, eflags); |
3567 |
|
3568 |
// Insert the event struct in the queue |
3569 |
if (nvfp->event_tail != NULL) |
3570 |
nvfp->event_tail->next = nvet; |
3571 |
if (nvfp->event_head == NULL) |
3572 |
nvfp->event_head = nvet; |
3573 |
nvfp->event_tail = nvet; |
3574 |
nvet->next = NULL; |
3575 |
|
3576 |
// copy the event into the queue |
3577 |
nvet->event = *event; |
3578 |
|
3579 |
// set the handle for this event |
3580 |
nvet->event.hObject = handle; |
3581 |
nvet->event.index = index; |
3582 |
|
3583 |
wake_up_interruptible(&nvfp->waitqueue); |
3584 |
NV_SPIN_UNLOCK_IRQRESTORE(&nvfp->fp_lock, eflags); |
3585 |
} |
3586 |
|
3587 |
int NV_API_CALL nv_get_event( |
3588 |
nv_state_t *nv, |
3589 |
void *void_file, |
3590 |
nv_event_t *event, |
3591 |
U032 *more_events |
3592 |
) |
3593 |
{ |
3594 |
struct file *file = (struct file *) void_file; |
3595 |
nv_file_private_t *nvfp = NV_GET_NVFP(file); |
3596 |
nvidia_event_t *nvet; |
3597 |
unsigned long eflags; |
3598 |
|
3599 |
NV_SPIN_LOCK_IRQSAVE(&nvfp->fp_lock, eflags); |
3600 |
if (nvfp->event_head == NULL) |
3601 |
{ |
3602 |
NV_SPIN_UNLOCK_IRQRESTORE(&nvfp->fp_lock, eflags); |
3603 |
return -1; |
3604 |
} |
3605 |
|
3606 |
nvet = nvfp->event_head; |
3607 |
|
3608 |
*event = nvet->event; |
3609 |
|
3610 |
if (nvfp->event_tail == nvet) |
3611 |
nvfp->event_tail = NULL; |
3612 |
nvfp->event_head = nvet->next; |
3613 |
|
3614 |
NV_KFREE(nvet, sizeof(nvidia_event_t)); |
3615 |
|
3616 |
if (more_events) |
3617 |
*more_events = (nvfp->event_head != NULL); |
3618 |
|
3619 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: returning event: 0x%x\n", event); |
3620 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: hParent: 0x%x\n", event->hParent); |
3621 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: hObject: 0x%x\n", event->hObject); |
3622 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: file: 0x%p\n", event->file); |
3623 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: fd: %d\n", event->fd); |
3624 |
if (more_events) |
3625 |
nv_printf(NV_DBG_EVENTINFO, "NVRM: more events: %d\n", *more_events); |
3626 |
|
3627 |
NV_SPIN_UNLOCK_IRQRESTORE(&nvfp->fp_lock, eflags); |
3628 |
|
3629 |
return 0; |
3630 |
} |
3631 |
|
3632 |
|
3633 |
int NV_API_CALL nv_agp_init( |
3634 |
nv_state_t *nv, |
3635 |
void **phys_start, |
3636 |
void *agp_limit, |
3637 |
U032 config /* passed in from XF86Config file */ |
3638 |
) |
3639 |
{ |
3640 |
U032 status = 1; |
3641 |
static int old_error = 0; |
3642 |
|
3643 |
if (NV_AGP_ENABLED(nv)) |
3644 |
return -1; |
3645 |
|
3646 |
if (config == NVOS_AGP_CONFIG_DISABLE_AGP) |
3647 |
{ |
3648 |
nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP; |
3649 |
nv->agp_status = NV_AGP_STATUS_DISABLED; |
3650 |
return 0; |
3651 |
} |
3652 |
|
3653 |
nv_printf(NV_DBG_SETUP, "NVRM: nv_agp_init\n"); |
3654 |
|
3655 |
nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP; |
3656 |
nv->agp_status = NV_AGP_STATUS_FAILED; |
3657 |
|
3658 |
if (config & NVOS_AGP_CONFIG_OSAGP) |
3659 |
{ |
3660 |
status = KernInitAGP(nv, phys_start, agp_limit); |
3661 |
|
3662 |
/* if enabling agpgart was successfull, register it, |
3663 |
* and check about overrides |
3664 |
*/ |
3665 |
if (status == 0) |
3666 |
{ |
3667 |
nv->agp_config = NVOS_AGP_CONFIG_OSAGP; |
3668 |
nv->agp_status = NV_AGP_STATUS_ENABLED; |
3669 |
|
3670 |
/* make sure we apply our overrides in this case */ |
3671 |
rm_update_agp_config(nv); |
3672 |
} |
3673 |
|
3674 |
if (status == 1 && !(config & NVOS_AGP_CONFIG_NVAGP) && !old_error) |
3675 |
{ |
3676 |
nv_printf(NV_DBG_ERRORS, |
3677 |
"NVRM: unable to initialize the Linux AGPGART driver, please \n" |
3678 |
"NVRM: verify you configured your kernel to include support \n" |
3679 |
"NVRM: for AGPGART (either statically linked, or as a kernel \n" |
3680 |
"NVRM: module). Please also make sure you selected support \n" |
3681 |
"NVRM: for your AGP chipset. \n"); |
3682 |
#if !defined(KERNEL_2_4) |
3683 |
nv_printf(NV_DBG_ERRORS, |
3684 |
"NVRM: \n" |
3685 |
"NVRM: note that as of Linux 2.6 AGPGART, all chipset/vendor \n" |
3686 |
"NVRM: drivers are split into independent modules; make sure \n" |
3687 |
"NVRM: the correct one is loaded for your chipset. \n"); |
3688 |
#endif |
3689 |
old_error = 1; |
3690 |
} |
3691 |
|
3692 |
/* if agpgart is loaded, but we failed to initialize it, |
3693 |
* we'd better not attempt nvagp, or we're likely to lock |
3694 |
* the machine. |
3695 |
*/ |
3696 |
if (status < 0) |
3697 |
return status; |
3698 |
} |
3699 |
|
3700 |
/* we're either explicitly not using agpgart, |
3701 |
* or trying to use agpgart failed |
3702 |
* make sure the user did not specify "use agpgart only" |
3703 |
*/ |
3704 |
if ( (!NV_AGP_ENABLED(nv)) && (config & NVOS_AGP_CONFIG_NVAGP) ) |
3705 |
{ |
3706 |
/* make sure the user does not have agpgart loaded */ |
3707 |
#if defined(KERNEL_2_4) |
3708 |
if (inter_module_get("drm_agp")) |
3709 |
{ |
3710 |
inter_module_put("drm_agp"); |
3711 |
nv_printf(NV_DBG_WARNINGS, "NVRM: not using NVAGP, AGPGART is loaded!\n"); |
3712 |
return status; |
3713 |
} |
3714 |
#elif defined(AGPGART) |
3715 |
#if (NV_AGP_BACKEND_ACQUIRE_ARGUMENT_COUNT == 1) |
3716 |
if (!list_empty(&agp_bridges)) |
3717 |
{ |
3718 |
nv_printf(NV_DBG_WARNINGS, |
3719 |
"NVRM: not using NVAGP, an AGPGART backend is loaded!\n"); |
3720 |
return status; |
3721 |
} |
3722 |
#else |
3723 |
int error; |
3724 |
/* |
3725 |
* We can only safely use NvAGP when no backend has been |
3726 |
* registered with the AGPGART frontend. This condition |
3727 |
* is only met when the acquire function returns -EINVAL. |
3728 |
* |
3729 |
* Other return codes indicate that a backend is present |
3730 |
* and was either acquired, busy or else unavailable. |
3731 |
*/ |
3732 |
if ((error = agp_backend_acquire()) != -EINVAL) |
3733 |
{ |
3734 |
if (!error) agp_backend_release(); |
3735 |
nv_printf(NV_DBG_WARNINGS, |
3736 |
"NVRM: not using NVAGP, an AGPGART backend is loaded!\n"); |
3737 |
return status; |
3738 |
} |
3739 |
#endif |
3740 |
#endif /* AGPGART */ |
3741 |
#if defined(CONFIG_X86_64) && defined(CONFIG_GART_IOMMU) |
3742 |
nv_printf(NV_DBG_WARNINGS, |
3743 |
"NVRM: not using NVAGP, kernel was compiled with GART_IOMMU support!!\n"); |
3744 |
#else |
3745 |
status = rm_init_agp(nv); |
3746 |
if (status == RM_OK) |
3747 |
{ |
3748 |
nv->agp_config = NVOS_AGP_CONFIG_NVAGP; |
3749 |
nv->agp_status = NV_AGP_STATUS_ENABLED; |
3750 |
} |
3751 |
#endif |
3752 |
} |
3753 |
|
3754 |
if (NV_AGP_ENABLED(nv)) |
3755 |
old_error = 0; /* report new errors */ |
3756 |
|
3757 |
return status; |
3758 |
} |
3759 |
|
3760 |
int NV_API_CALL nv_agp_teardown( |
3761 |
nv_state_t *nv |
3762 |
) |
3763 |
{ |
3764 |
U032 status = 1; |
3765 |
|
3766 |
nv_printf(NV_DBG_SETUP, "NVRM: nv_agp_teardown\n"); |
3767 |
|
3768 |
/* little sanity check won't hurt */ |
3769 |
if (!NV_AGP_ENABLED(nv)) |
3770 |
return -1; |
3771 |
|
3772 |
if (NV_OSAGP_ENABLED(nv)) |
3773 |
status = KernTeardownAGP(nv); |
3774 |
else if (NV_NVAGP_ENABLED(nv)) |
3775 |
status = rm_teardown_agp(nv); |
3776 |
|
3777 |
nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP; |
3778 |
nv->agp_status = NV_AGP_STATUS_DISABLED; |
3779 |
|
3780 |
return status; |
3781 |
} |
3782 |
|
3783 |
int NV_API_CALL nv_int10h_call( |
3784 |
nv_state_t *nv, |
3785 |
U032 *eax, |
3786 |
U032 *ebx, |
3787 |
U032 *ecx, |
3788 |
U032 *edx, |
3789 |
void *buffer |
3790 |
) |
3791 |
{ |
3792 |
return -1; |
3793 |
} |
3794 |
|
3795 |
/* set a timer to go off every second */ |
3796 |
int NV_API_CALL nv_start_rc_timer( |
3797 |
nv_state_t *nv |
3798 |
) |
3799 |
{ |
3800 |
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
3801 |
|
3802 |
if (nv->rc_timer_enabled) |
3803 |
return -1; |
3804 |
|
3805 |
nv_printf(NV_DBG_INFO, "NVRM: initializing rc timer\n"); |
3806 |
init_timer(&nvl->rc_timer); |
3807 |
nvl->rc_timer.function = nv_kern_rc_timer; |
3808 |
nvl->rc_timer.data = (unsigned long) nv; |
3809 |
nv->rc_timer_enabled = 1; |
3810 |
mod_timer(&nvl->rc_timer, jiffies + HZ); /* set our timeout for 1 second */ |
3811 |
nv_printf(NV_DBG_INFO, "NVRM: rc timer initialized\n"); |
3812 |
|
3813 |
return 0; |
3814 |
} |
3815 |
|
3816 |
int NV_API_CALL nv_stop_rc_timer( |
3817 |
nv_state_t *nv |
3818 |
) |
3819 |
{ |
3820 |
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); |
3821 |
|
3822 |
if (!nv->rc_timer_enabled) |
3823 |
return -1; |
3824 |
|
3825 |
nv_printf(NV_DBG_INFO, "NVRM: stopping rc timer\n"); |
3826 |
nv->rc_timer_enabled = 0; |
3827 |
del_timer_sync(&nvl->rc_timer); |
3828 |
nv_printf(NV_DBG_INFO, "NVRM: rc timer stopped\n"); |
3829 |
|
3830 |
return 0; |
3831 |
} |
3832 |
|
3833 |
/* make sure the pci_driver called probe for all of our devices. |
3834 |
* we've seen cases where rivafb claims the device first and our driver |
3835 |
* doesn't get called. |
3836 |
*/ |
3837 |
static int |
3838 |
nvos_count_devices(void) |
3839 |
{ |
3840 |
struct pci_dev *dev; |
3841 |
int count = 0; |
3842 |
|
3843 |
dev = NV_PCI_GET_CLASS(PCI_CLASS_DISPLAY_VGA << 8, NULL); |
3844 |
while (dev) |
3845 |
{ |
3846 |
if ((dev->vendor == 0x10de) && (dev->device >= 0x20) && |
3847 |
!rm_is_legacy_device(dev->device, TRUE)) |
3848 |
count++; |
3849 |
dev = NV_PCI_GET_CLASS(PCI_CLASS_DISPLAY_VGA << 8, dev); |
3850 |
} |
3851 |
|
3852 |
dev = NV_PCI_GET_CLASS(PCI_CLASS_DISPLAY_3D << 8, NULL); |
3853 |
while (dev) |
3854 |
{ |
3855 |
if ((dev->vendor == 0x10de) && (dev->device >= 0x20) && |
3856 |
!rm_is_legacy_device(dev->device, TRUE)) |
3857 |
count++; |
3858 |
dev = NV_PCI_GET_CLASS(PCI_CLASS_DISPLAY_3D << 8, dev); |
3859 |
} |
3860 |
|
3861 |
return count; |
3862 |
} |
3863 |
|
3864 |
/* find nvidia devices and set initial state */ |
3865 |
int |
3866 |
nv_kern_probe |
3867 |
( |
3868 |
struct pci_dev *dev, |
3869 |
const struct pci_device_id *id_table |
3870 |
) |
3871 |
{ |
3872 |
nv_state_t *nv; |
3873 |
nv_linux_state_t *nvl; |
3874 |
unsigned int i, j; |
3875 |
|
3876 |
nv_printf(NV_DBG_SETUP, "NVRM: probing 0x%x 0x%x, class 0x%x\n", |
3877 |
dev->vendor, dev->device, dev->class); |
3878 |
|
3879 |
if ((dev->vendor != 0x10de) || (dev->device < 0x20) || |
3880 |
((dev->class != (PCI_CLASS_DISPLAY_VGA << 8)) && |
3881 |
(dev->class != (PCI_CLASS_DISPLAY_3D << 8))) || |
3882 |
rm_is_legacy_device(dev->device, FALSE)) |
3883 |
{ |
3884 |
return -1; |
3885 |
} |
3886 |
|
3887 |
num_probed_nv_devices++; |
3888 |
|
3889 |
if (num_nv_devices == NV_MAX_DEVICES) |
3890 |
{ |
3891 |
nv_printf(NV_DBG_ERRORS, "NVRM: maximum device number (%d) exceeded!\n", |
3892 |
(NV_MAX_DEVICES - 1)); |
3893 |
return -1; |
3894 |
} |
3895 |
|
3896 |
if (pci_enable_device(dev) != 0) |
3897 |
{ |
3898 |
nv_printf(NV_DBG_ERRORS, |
3899 |
"NVRM: pci_enable_device failed, aborting\n"); |
3900 |
return -1; |
3901 |
} |
3902 |
|
3903 |
if (dev->irq == 0) |
3904 |
{ |
3905 |
nv_printf(NV_DBG_ERRORS, "NVRM: Can't find an IRQ for your NVIDIA card!\n"); |
3906 |
nv_printf(NV_DBG_ERRORS, "NVRM: Please check your BIOS settings.\n"); |
3907 |
nv_printf(NV_DBG_ERRORS, "NVRM: [Plug & Play OS] should be set to NO\n"); |
3908 |
nv_printf(NV_DBG_ERRORS, "NVRM: [Assign IRQ to VGA] should be set to YES \n"); |
3909 |
return -1; |
3910 |
} |
3911 |
|
3912 |
// we won't always have a bar 3 |
3913 |
for (i = 0; i < (NV_GPU_NUM_BARS - 1); i++) |
3914 |
{ |
3915 |
if (NV_PCI_RESOURCE_VALID(dev, i)) |
3916 |
continue; |
3917 |
nv_printf(NV_DBG_ERRORS, |
3918 |
"NVRM: This PCI I/O region assigned to your NVIDIA device is invalid:\n" |
3919 |
"NVRM: BAR%d is %dM @ 0x%08x (PCI:%04x:%02x.%x)\n", i, |
3920 |
NV_PCI_RESOURCE_SIZE(dev, i) >> 20, NV_PCI_RESOURCE_START(dev, i), |
3921 |
NV_PCI_BUS_NUMBER(dev), NV_PCI_SLOT_NUMBER(dev), PCI_FUNC(dev->devfn)); |
3922 |
if (NV_PCI_RESOURCE_FLAGS(dev, i) & PCI_BASE_ADDRESS_MEM_TYPE_64) |
3923 |
{ |
3924 |
nv_printf(NV_DBG_ERRORS, |
3925 |
"NVRM: This is a 64-bit BAR, which some Linux kernels are known to\n" |
3926 |
"NVRM: ignore or handle incorrectly. Please see the README section\n" |
3927 |
"NVRM: on 64-bit BARs for more information.\n"); |
3928 |
} |
3929 |
else |
3930 |
{ |
3931 |
nv_printf(NV_DBG_ERRORS, |
3932 |
"NVRM: The system BIOS may have misconfigured your graphics card.\n"); |
3933 |
} |
3934 |
return -1; |
3935 |
} |
3936 |
|
3937 |
// request ownership of our bars |
3938 |
// keeps other drivers from banging our registers. |
3939 |
// only do this for registers, as vesafb requests our framebuffer and will |
3940 |
// keep us from working properly |
3941 |
if (!request_mem_region(NV_PCI_RESOURCE_START(dev, NV_GPU_BAR_INDEX_REGS), |
3942 |
NV_PCI_RESOURCE_SIZE(dev, NV_GPU_BAR_INDEX_REGS), "nvidia")) |
3943 |
{ |
3944 |
nv_printf(NV_DBG_ERRORS, |
3945 |
"NVRM: request_mem_region failed for %dM @ 0x%08x. This can\n" |
3946 |
"NVRM: occur when a driver such as rivatv is loaded and claims\n" |
3947 |
"NVRM: ownership of the device's registers.\n", |
3948 |
NV_PCI_RESOURCE_SIZE(dev, NV_GPU_BAR_INDEX_REGS) >> 20, |
3949 |
NV_PCI_RESOURCE_START(dev, NV_GPU_BAR_INDEX_REGS)); |
3950 |
return -1; |
3951 |
} |
3952 |
pci_set_master(dev); |
3953 |
|
3954 |
/* initialize bus-dependent config state */ |
3955 |
nvl = &nv_linux_devices[num_nv_devices]; |
3956 |
nv = NV_STATE_PTR(nvl); |
3957 |
|
3958 |
pci_set_drvdata(dev, (void *)nvl); |
3959 |
|
3960 |
/* default to 32-bit PCI bus address space */ |
3961 |
dev->dma_mask = 0xffffffffULL; |
3962 |
|
3963 |
nvl->dev = dev; |
3964 |
nv->vendor_id = dev->vendor; |
3965 |
nv->device_id = dev->device; |
3966 |
nv->os_state = (void *) nvl; |
3967 |
nv->bus = NV_PCI_BUS_NUMBER(dev); |
3968 |
nv->slot = NV_PCI_SLOT_NUMBER(dev); |
3969 |
nv->handle = dev; |
3970 |
|
3971 |
nv_lock_init_locks(nv); |
3972 |
|
3973 |
for (i = 0, j = 0; i < NVRM_PCICFG_NUM_BARS && j < NV_GPU_NUM_BARS; i++) |
3974 |
{ |
3975 |
if ((NV_PCI_RESOURCE_VALID(dev, i)) && |
3976 |
(NV_PCI_RESOURCE_FLAGS(dev, i) & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) |
3977 |
{ |
3978 |
nv->bars[j].address = NV_PCI_RESOURCE_START(dev, i); |
3979 |
nv->bars[j].size = NV_PCI_RESOURCE_SIZE(dev, i); |
3980 |
nv->bars[j].offset = NVRM_PCICFG_BAR_OFFSET(i); |
3981 |
j++; |
3982 |
} |
3983 |
} |
3984 |
nv->regs = &nv->bars[NV_GPU_BAR_INDEX_REGS]; |
3985 |
nv->fb = &nv->bars[NV_GPU_BAR_INDEX_FB]; |
3986 |
|
3987 |
nv->interrupt_line = dev->irq; |
3988 |
|
3989 |
#if defined(CONFIG_VGA_ARB) |
3990 |
#if defined(VGA_DEFAULT_DEVICE) |
3991 |
vga_tryget(VGA_DEFAULT_DEVICE, VGA_RSRC_LEGACY_MASK); |
3992 |
#endif |
3993 |
vga_set_legacy_decoding(dev, VGA_RSRC_NONE); |
3994 |
#endif |
3995 |
|
3996 |
if (!rm_init_private_state(nv)) |
3997 |
{ |
3998 |
nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_private_state() failed!\n"); |
3999 |
goto err_zero_dev; |
4000 |
} |
4001 |
|
4002 |
nv_printf(NV_DBG_INFO, "NVRM: %02x:%02x.%x %04x:%04x - 0x%08x [size=%dM]\n", |
4003 |
nv->bus, nv->slot, PCI_FUNC(dev->devfn), |
4004 |
nv->vendor_id, nv->device_id, nv->regs->address, |
4005 |
nv->regs->size / (1024 * 1024)); |
4006 |
nv_printf(NV_DBG_INFO, "NVRM: %02x:%02x.%x %04x:%04x - 0x%08x [size=%dM]\n", |
4007 |
nv->bus, nv->slot, PCI_FUNC(dev->devfn), |
4008 |
nv->vendor_id, nv->device_id, nv->fb->address, |
4009 |
nv->fb->size / (1024 * 1024)); |
4010 |
|
4011 |
num_nv_devices++; |
4012 |
|
4013 |
return 0; |
4014 |
|
4015 |
err_zero_dev: |
4016 |
rm_free_private_state(nv); |
4017 |
os_mem_set(nvl, 0, sizeof(nv_linux_state_t)); |
4018 |
release_mem_region(NV_PCI_RESOURCE_START(dev, NV_GPU_BAR_INDEX_REGS), |
4019 |
NV_PCI_RESOURCE_SIZE(dev, NV_GPU_BAR_INDEX_REGS)); |
4020 |
NV_PCI_DISABLE_DEVICE(dev); |
4021 |
return -1; |
4022 |
} |
4023 |
|
4024 |
int NV_API_CALL nv_no_incoherent_mappings(void) |
4025 |
{ |
4026 |
#if defined(NV_CHANGE_PAGE_ATTR_PRESENT) || defined(NV_SET_PAGES_UC_PRESENT) |
4027 |
return (nv_update_memory_types); |
4028 |
#else |
4029 |
return 0; |
4030 |
#endif |
4031 |
} |
4032 |
|
4033 |
#if defined(NV_PM_SUPPORT_DEVICE_DRIVER_MODEL) |
4034 |
|
4035 |
static int |
4036 |
nv_power_management( |
4037 |
struct pci_dev *dev, |
4038 |
u32 state |
4039 |
) |
4040 |
{ |
4041 |
nv_state_t *nv; |
4042 |
nv_linux_state_t *lnv = NULL; |
4043 |
int status = RM_OK; |
4044 |
|
4045 |
nv_printf(NV_DBG_INFO, "NVRM: nv_power_management: %d\n", state); |
4046 |
lnv = pci_get_drvdata(dev); |
4047 |
|
4048 |
if ((!lnv) || (lnv->dev != dev)) |
4049 |
{ |
4050 |
nv_printf(NV_DBG_WARNINGS, "NVRM: PM: invalid device!\n"); |
4051 |
return -1; |
4052 |
} |
4053 |
|
4054 |
nv = NV_STATE_PTR(lnv); |
4055 |
|
4056 |
nv_verify_pci_config(nv); |
4057 |
|
4058 |
switch (state) |
4059 |
{ |
4060 |
#if defined(NV_PM_SUPPORT_NEW_STYLE_APM) |
4061 |
case PCI_D3hot: |
4062 |
nv_printf(NV_DBG_INFO, "NVRM: APM: received suspend event\n"); |
4063 |
status = rm_power_management(nv, 0, NV_PM_APM_SUSPEND); |
4064 |
__nv_disable_pat_support(); |
4065 |
break; |
4066 |
|
4067 |
case PCI_D0: |
4068 |
nv_printf(NV_DBG_INFO, "NVRM: APM: received resume event\n"); |
4069 |
__nv_enable_pat_support(); |
4070 |
status = rm_power_management(nv, 0, NV_PM_APM_RESUME); |
4071 |
break; |
4072 |
|
4073 |
#else /* end of NV_PM_SUPPORT_NEW_STYLE_APM */ |
4074 |
case PCI_D3hot: |
4075 |
nv_printf(NV_DBG_INFO, "NVRM: ACPI: received suspend event\n"); |
4076 |
status = rm_power_management(nv, 0, NV_PM_ACPI_STANDBY); |
4077 |
__nv_disable_pat_support(); |
4078 |
break; |
4079 |
|
4080 |
case PCI_D0: |
4081 |
nv_printf(NV_DBG_INFO, "NVRM: ACPI: received resume event\n"); |
4082 |
__nv_enable_pat_support(); |
4083 |
status = rm_power_management(nv, 0, NV_PM_ACPI_RESUME); |
4084 |
break; |
4085 |
|
4086 |
#endif /* End of NV_PM_SUPPORT_NEW_STYLE_APM */ |
4087 |
|
4088 |
default: |
4089 |
nv_printf(NV_DBG_WARNINGS, "NVRM: PM: unsupported event: %d\n", state); |
4090 |
return -1; |
4091 |
} |
4092 |
|
4093 |
if (status != RM_OK) |
4094 |
nv_printf(NV_DBG_ERRORS, "NVRM: PM: failed event: %d\n", state); |
4095 |
|
4096 |
return status; |
4097 |
} |
4098 |
|
4099 |
static int nv_kern_suspend( |
4100 |
struct pci_dev *dev, |
4101 |
pm_message_t state |
4102 |
) |
4103 |
{ |
4104 |
int power_state = -1; |
4105 |
|
4106 |
#if !defined(NV_PM_MESSAGE_T_PRESENT) |
4107 |
power_state = state; |
4108 |
#elif defined(NV_PCI_CHOOSE_STATE_PRESENT) |
4109 |
power_state = pci_choose_state(dev, state); |
4110 |
#endif |
4111 |
|
4112 |
return nv_power_management(dev, power_state); |
4113 |
} |
4114 |
|
4115 |
static int nv_kern_resume( |
4116 |
struct pci_dev *dev |
4117 |
) |
4118 |
{ |
4119 |
return nv_power_management(dev, PCI_D0); |
4120 |
} |
4121 |
|
4122 |
#endif /* defined(NV_PM_SUPPORT_DEVICE_DRIVER_MODEL) */ |
4123 |
|
4124 |
void* NV_API_CALL nv_get_adapter_state( |
4125 |
U016 bus, |
4126 |
U016 slot |
4127 |
) |
4128 |
{ |
4129 |
unsigned int i; |
4130 |
|
4131 |
for (i = 0; i < num_nv_devices; i++) |
4132 |
{ |
4133 |
nv_state_t *nv = NV_STATE_PTR(&nv_linux_devices[i]); |
4134 |
if (nv->bus == bus && nv->slot == slot) |
4135 |
return (void *) nv; |
4136 |
} |
4137 |
|
4138 |
return NULL; |
4139 |
} |
4140 |
|
4141 |
void NV_API_CALL nv_verify_pci_config( |
4142 |
nv_state_t *nv |
4143 |
) |
4144 |
{ |
4145 |
BOOL check_the_bars; |
4146 |
|
4147 |
check_the_bars = (!nv_mmconfig_failure_detected && NV_MAY_SLEEP()); |
4148 |
rm_check_pci_config_space(nv, check_the_bars, &nv_mmconfig_failure_detected); |
4149 |
|
4150 |
if (nv_mmconfig_failure_detected) |
4151 |
{ |
4152 |
if (NV_MAY_SLEEP()) |
4153 |
{ |
4154 |
nvos_proc_add_text_file(proc_nvidia_warnings, "mmconfig", |
4155 |
__mmconfig_warning); |
4156 |
} |
4157 |
nv_prints(NV_DBG_ERRORS, __mmconfig_warning); |
4158 |
} |
4159 |
} |