--- common/lib/modules/fglrx/build_mod/firegl_public.c +++ common/lib/modules/fglrx/build_mod/firegl_public.c @@ -24,13 +24,13 @@ // ============================================================ #include -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) #error Kernel versions older than 2.6.0 are no longer supported by this module. -#endif +#endif #include -#if !defined(CONFIG_X86_PC) +#if !defined(CONFIG_X86_PC) #if !defined(CONFIG_X86_64) #if !defined(CONFIG_X86_VOYAGER) #if !defined(CONFIG_X86_NUMAQ) @@ -62,10 +62,10 @@ * distribution would even include such a kernel patch. */ #ifdef CONFIG_MEM_MIRROR /* Prevent asm/mm_track.h from being included in subsequent - * kernel headers as that would redefine CONFIG_MEM_MIRROR. */ + * kernel headers as that would redefine CONFIG_MEM_MIRROR. */ #ifndef CONFIG_X86_64 #define __I386_MMTRACK_H__ -#define mm_track(ptep) +#define mm_track(ptep) #else #define __X86_64_MMTRACK_H__ #define mm_track_pte(ptep) @@ -93,7 +93,7 @@ #include #include #include -#include +//#include #include #include #include @@ -151,8 +151,8 @@ #include "linux/freezer.h" #endif -// For 2.6.18 or higher, the UTS_RELEASE is defined in the linux/utsrelease.h. -#ifndef UTS_RELEASE +// For 2.6.18 or higher, the UTS_RELEASE is defined in the linux/utsrelease.h. +#ifndef UTS_RELEASE #include #endif @@ -210,7 +210,7 @@ char* firegl = NULL; int __ke_debuglevel = 0; int __ke_moduleflags = 0; -static struct pci_device_id fglrx_pci_table[] = +static struct pci_device_id fglrx_pci_table[] = { #define FGL_ASIC_ID(x) \ { \ @@ -278,7 +278,7 @@ const unsigned long __ke_PAE_State = 1; static int firegl_pat_enabled = 0; static unsigned long KCL_pat[2]; -static unsigned long KCL_orig_pat[2]; +static unsigned long KCL_orig_pat[2]; static int ATI_API_CALL KCL_enable_pat(void); static void ATI_API_CALL KCL_disable_pat(void); @@ -305,7 +305,7 @@ long ip_firegl_compat_ioctl(struct file* filp, unsigned int cmd, unsigned long a #endif __ke_ssize_t ip_firegl_read( struct file *filp, - char *buf, + char *buf, __ke_size_t size, __ke_loff_t *off_ptr) { @@ -313,7 +313,7 @@ __ke_ssize_t ip_firegl_read( struct file *filp, } __ke_ssize_t ip_firegl_write( struct file *filp, - const char *buf, + const char *buf, __ke_size_t size, __ke_loff_t *off_ptr) { @@ -449,34 +449,34 @@ READ_PROC_WRAP(firegl_debug_info) READ_PROC_WRAP(firegl_bios_version) READ_PROC_WRAP(firegl_interrupt_info) -static int +static int firegl_interrupt_open_wrap( - struct inode *inode, - struct file *file) + struct inode *inode, + struct file *file) { return firegl_interrupt_open(inode, file); } -static int +static int firegl_interrupt_release_wrap( - struct inode *inode, - struct file *file) + struct inode *inode, + struct file *file) { return firegl_interrupt_release(inode, file); } -static ssize_t +static ssize_t firegl_interrupt_read_wrap( - struct file *user_file, - char __user *user_buf, - size_t user_buf_size, + struct file *user_file, + char __user *user_buf, + size_t user_buf_size, loff_t *user_file_pos) { return (ssize_t) firegl_interrupt_read(user_file, user_buf, user_buf_size, user_file_pos); } -static unsigned int -firegl_interrupt_poll_wrap(struct file *user_file, poll_table *pt) +static unsigned int +firegl_interrupt_poll_wrap(struct file *user_file, poll_table *pt) { if(firegl_interrupt_poll(user_file, (__ke_poll_table*)pt)) { @@ -488,11 +488,11 @@ firegl_interrupt_poll_wrap(struct file *user_file, poll_table *pt) } } -static ssize_t +static ssize_t firegl_interrupt_write_wrap( - struct file *user_file, - const char __user *user_buf, - size_t user_buf_size, + struct file *user_file, + const char __user *user_buf, + size_t user_buf_size, loff_t *user_file_pos) { return (ssize_t) firegl_interrupt_write(user_file, user_buf, user_buf_size, user_file_pos); @@ -502,7 +502,7 @@ firegl_interrupt_write_wrap( * \param func function to be wrapped * \return None */ -static void +static void firegl_smp_func_parameter_wrap( void *func) { @@ -517,7 +517,7 @@ static struct file_operations firegl_interrupt_file_ops = { .write = firegl_interrupt_write_wrap }; -__ke_proc_list_t firegl_proc_list[] = +__ke_proc_list_t firegl_proc_list[] = { { "name", drm_name_info_wrap, NULL}, { "mem", drm_mem_info_wrap, NULL}, @@ -586,11 +586,11 @@ static struct proc_dir_entry *firegl_proc_init( device_t *dev, { ent->proc_fops = (struct file_operations*)list->fops; } - + { ent->data = (dev->pubdev.signature == FGL_DEVICE_SIGNATURE)? firegl_find_device(minor) : (dev); } - + list++; } @@ -623,7 +623,7 @@ static int firegl_proc_cleanup( int minor, { remove_proc_entry("dri", NULL); __KE_DEBUG("remove proc dri. \n"); - } + } return 0; } @@ -661,12 +661,12 @@ static int firegl_stub_getminor(const char *name, struct file_operations *fops, { int i; int count = 0; - + __KE_DEBUG("firegl_stub_getminor: name=\"%s\"\n", name); - for( i = 0; i < FIREGL_STUB_MAXCARDS; i++ ) + for( i = 0; i < FIREGL_STUB_MAXCARDS; i++ ) { - if( !firegl_stub_list[i].fops ) + if( !firegl_stub_list[i].fops ) { firegl_stub_list[i].name = name; firegl_stub_list[i].fops = fops; @@ -693,16 +693,16 @@ static int firegl_stub_putminor(int minor) if (minor < 0 || minor >= FIREGL_STUB_MAXCARDS) { return -1; - } + } firegl_proc_cleanup(minor, firegl_stub_root, firegl_stub_list[minor].dev_root, firegl_stub_list[minor].proclist); firegl_stub_list[minor].name = NULL; firegl_stub_list[minor].fops = NULL; firegl_stub_list[minor].proclist = NULL; - if( minor == (firegl_minors-1) ) + if( minor == (firegl_minors-1) ) { unregister_chrdev(DRM_MAJOR, "drm"); - } + } return 0; } @@ -726,7 +726,7 @@ static int __init firegl_stub_register(const char *name, struct file_operations return -1; } else if(err == -EBUSY) { - // the registering of the module's device has failed + // the registering of the module's device has failed // because there was already some other drm module loaded. __KE_DEBUG("register_chrdev() failed with -EBUSY\n"); return -1; @@ -758,7 +758,7 @@ static int fglrx_pci_probe(struct pci_dev *dev, const struct pci_device_id *id_t /* Starting from 2.6.14, kernel has new struct defined for pm_message_t, we have to handle this case separately. 2.6.11/12/13 kernels have pm_message_t defined as int and older kernels - don't have pm_message_t defined. + don't have pm_message_t defined. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) static int fglrx_pci_suspend(struct pci_dev *pdev, pm_message_t pm_event) @@ -794,9 +794,9 @@ static int fglrx_pci_suspend(struct pci_dev *pdev, u32 pm_event) if (!ret) { - - // since privdev->pcidev is acquired in X server, use pdev - // directly here to allow suspend/resume without X server start. + + // since privdev->pcidev is acquired in X server, use pdev + // directly here to allow suspend/resume without X server start. firegl_pci_save_state((__ke_pci_dev_t*)pdev, privdev); pci_disable_device(pdev); PMSG_EVENT(pdev->dev.power.power_state) = state; @@ -848,13 +848,13 @@ static int fglrx_pci_resume(struct pci_dev *pdev) // before pci_set_master! firegl_pci_restore_state((__ke_pci_dev_t*)pdev, privdev); - if (pci_enable_device(pdev)) + if (pci_enable_device(pdev)) { __KE_ERROR("Cannot enable PCI device.\n"); - } + } pci_set_master(pdev); - + firegl_cail_powerup(privdev); if (PMSG_EVENT(pdev->dev.power.power_state) == PM_EVENT_SUSPEND) @@ -865,7 +865,7 @@ static int fglrx_pci_resume(struct pci_dev *pdev) return 0; } -static struct pci_driver fglrx_pci_driver = +static struct pci_driver fglrx_pci_driver = { .name = "fglrx_pci", .id_table = fglrx_pci_table, @@ -921,10 +921,10 @@ static int firegl_init_devices(__ke_device_t *pubdev) { pid = (struct pci_device_id *) &fglrx_pci_table[i]; pdev = NULL; - while (( pdev = pci_get_subsys(pid->vendor, - pid->device, - PCI_ANY_ID, - PCI_ANY_ID, + while (( pdev = pci_get_subsys(pid->vendor, + pid->device, + PCI_ANY_ID, + PCI_ANY_ID, pdev)) != NULL) { num_of_devices++; @@ -934,7 +934,7 @@ static int firegl_init_devices(__ke_device_t *pubdev) if (firegl_init_device_heads(num_of_devices)) { - return -ENOMEM; + return -ENOMEM; } for (i=0; fglrx_pci_table[i].vendor != 0; i++) @@ -942,15 +942,15 @@ static int firegl_init_devices(__ke_device_t *pubdev) pid = (struct pci_device_id *) &fglrx_pci_table[i]; pdev = NULL; - while (( pdev = pci_get_subsys(pid->vendor, - pid->device, - PCI_ANY_ID, - PCI_ANY_ID, + while (( pdev = pci_get_subsys(pid->vendor, + pid->device, + PCI_ANY_ID, + PCI_ANY_ID, pdev)) != NULL) { if ((ret_code = firegl_get_dev(pubdev, pdev))) { - return ret_code; + return ret_code; } j++; @@ -983,7 +983,7 @@ static int __init firegl_init_module(void) // init global vars that are in fact constants __ke_HZ = HZ; -#ifdef _KE_SERIAL_DEBUG +#ifdef _KE_SERIAL_DEBUG __ke_SetSerialPort(); #endif @@ -995,11 +995,11 @@ static int __init firegl_init_module(void) return retcode; } -#ifdef FIREGL_CF_SUPPORT +#ifdef FIREGL_CF_SUPPORT adapter_chain_init(); cf_object_init(); -#endif - +#endif + // init DRM proc list drm_proclist = kmalloc((DRM_PROC_ENTRIES + 1) * sizeof(__ke_proc_list_t), GFP_KERNEL); if ( drm_proclist == NULL ) @@ -1087,7 +1087,7 @@ static int __init firegl_init_module(void) dev->pubdev.date, firegl_minors); - + #ifdef FIREGL_POWER_MANAGEMENT if (pci_register_driver (&fglrx_pci_driver) < 0) { @@ -1117,12 +1117,12 @@ static void __exit firegl_cleanup_module(void) { KCL_disable_pat(); __KE_INFO("Disable PAT\n"); - } + } #endif // FIREGL_USWC_SUPPORT for (i = 0; i < count; i++) { - if ( firegl_stub_unregister(i) ) + if ( firegl_stub_unregister(i) ) { __KE_ERROR("Cannot unload module on minor: %d\n", i); } @@ -1144,10 +1144,10 @@ static void __exit firegl_cleanup_module(void) dev->pubdev.patchlevel, dev->pubdev.date); -#ifdef FIREGL_CF_SUPPORT +#ifdef FIREGL_CF_SUPPORT cf_object_cleanup(); - adapter_chain_cleanup(); -#endif // FIREGL_CF_SUPPORT + adapter_chain_cleanup(); +#endif // FIREGL_CF_SUPPORT firegl_private_cleanup (&dev->pubdev); @@ -1225,18 +1225,18 @@ void ATI_API_CALL __ke_add_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_w void ATI_API_CALL __ke_remove_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry) { // current->state = TASK_RUNNING; - remove_wait_queue((wait_queue_head_t*)(void *)queue_head, + remove_wait_queue((wait_queue_head_t*)(void *)queue_head, (wait_queue_t*)(void *)entry); } void ATI_API_CALL __ke_init_waitqueue_head(__ke_wait_queue_head_t* queue_head) { - init_waitqueue_head((wait_queue_head_t*)(void *)queue_head); + init_waitqueue_head((wait_queue_head_t*)(void *)queue_head); } void ATI_API_CALL __ke_wait_event_interruptible(__ke_wait_queue_head_t* queue_head, int condition) { - wait_event_interruptible(*((wait_queue_head_t*)(void *)queue_head), condition); + wait_event_interruptible(*((wait_queue_head_t*)(void *)queue_head), condition); } void ATI_API_CALL __ke_poll_wait(struct file* filp, __ke_wait_queue_head_t* queue_head, __ke_poll_table* pt) @@ -1247,13 +1247,13 @@ void ATI_API_CALL __ke_poll_wait(struct file* filp, __ke_wait_queue_head_t* queu void ATI_API_CALL *__ke_asyncio_alloc_sema() { int i; - + for(i=0; i= KERNEL_VERSION(2,6,17) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) rcu_read_lock(); -#else +#else read_lock(&tasklist_lock); -#endif - p = find_task_by_pid( pid ); - if (p) +#endif + p = find_task_by_vpid( pid ); + if (p) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) - if (p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD) + if (p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD) #else - if (!(p->flags & PF_EXITING)) + if (!(p->flags & PF_EXITING)) #endif { process_terminated = 0; } - } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) rcu_read_unlock(); -#else +#else read_unlock(&tasklist_lock); -#endif +#endif return process_terminated; } -/** /brief Call global OS kernel task/thread scheduler +/** /brief Call global OS kernel task/thread scheduler * /return Nonzero if a system call was awakened by a signal */ int ATI_API_CALL KCL_GetSignalStatus(void) @@ -1432,15 +1432,15 @@ void ATI_API_CALL __ke_unblock_all_signals(void) unblock_all_signals(); } -#if defined(__i386__) +#if defined(__i386__) #ifndef __HAVE_ARCH_CMPXCHG -static inline -unsigned long __fgl_cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: +static inline +unsigned long __fgl_cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) @@ -1551,7 +1551,7 @@ struct inode* ATI_API_CALL __ke_get_file_inode(struct file* filp) */ KCL_TYPE_Pid ATI_API_CALL KCL_GetPid(void) { - return current->pid; + return current->pid; } /** /brief Return the current Thread Group ID @@ -1559,7 +1559,7 @@ KCL_TYPE_Pid ATI_API_CALL KCL_GetPid(void) */ KCL_TYPE_Pid ATI_API_CALL KCL_GetTgid(void) { - return current->tgid; + return current->tgid; } /** /brief Return the effective user ID @@ -1655,7 +1655,7 @@ static int KCL_MAP_KernelConfigParam[] = 1 #else 0 -#endif +#endif }; /** /brief Check whether a kernel configuration parameter is defined @@ -1718,13 +1718,13 @@ unsigned long ATI_API_CALL __ke_get_resource_start(__ke_pci_dev_t *dev, unsigned #if defined(__x86_64__) || defined(__ia64__) void* ATI_API_CALL __ke_pci_alloc_consistent(__ke_pci_dev_t* dev, int size, void *dma_handle) { - return (pci_alloc_consistent( (struct pci_dev*)(void *)dev, size, dma_handle)); + return (pci_alloc_consistent( (struct pci_dev*)(void *)dev, size, dma_handle)); } void ATI_API_CALL __ke_pci_free_consistent(__ke_pci_dev_t* dev, int size, unsigned long cpu_addr, unsigned int dma_handle) { - pci_free_consistent( (struct pci_dev*)(void *)dev, size, (void *)cpu_addr, + pci_free_consistent( (struct pci_dev*)(void *)dev, size, (void *)cpu_addr, (unsigned long)dma_handle); } #endif // __ia64__ @@ -1748,7 +1748,7 @@ static int KCL_MAP_ErrorCode[] = /** \brief This function maps OS independent error conditions to OS defined error codes * \param errcode OS independent error condition code - * \return OS kernel defined error code corresponding to the requested error condition + * \return OS kernel defined error code corresponding to the requested error condition */ int ATI_API_CALL KCL_GetErrorCode(KCL_ENUM_ErrorCode errcode) { @@ -1766,7 +1766,7 @@ int ATI_API_CALL KCL_GetErrorCode(KCL_ENUM_ErrorCode errcode) int ATI_API_CALL firegl_get_user_ptr(u32 *src, void **dst) { unsigned long temp; - int err = get_user(temp, src); + int err = get_user(temp, src); *dst = (void*) temp; return err; } @@ -1906,7 +1906,7 @@ void ATI_API_CALL __ke_atomic_set(void* v, int val) int ATI_API_CALL __ke_atomic_dec_and_test(void* v) { - return atomic_dec_and_test((atomic_t*)v); + return atomic_dec_and_test((atomic_t*)v); } /*****************************************************************************/ @@ -2006,19 +2006,19 @@ void ATI_API_CALL __ke_print_debug(const char* fmt, ...) #endif #ifdef _KE_SERIAL_DEBUG -// To enable serial port debug message dumping,just define _KE_SERIAL_DEBUG in firegl_public.h file. -// Connect two PC with a null modern serial cable. run Hyper ternimal on the remote machine. -// It's useful to debug resume if network not works properly and serial port is not recovered +// To enable serial port debug message dumping,just define _KE_SERIAL_DEBUG in firegl_public.h file. +// Connect two PC with a null modern serial cable. run Hyper ternimal on the remote machine. +// It's useful to debug resume if network not works properly and serial port is not recovered // properly when fglrx resume hook is called... - - + + #define SER_DATA_PORT 0x3f8 #define SER_INT_CTRL_PORT SER_DATA_PORT + 1 #define SER_INT_STAT_PORT SER_DATA_PORT + 2 #define SER_LINE_CTRL_PORT SER_DATA_PORT + 3 #define SER_MODEM_CTRL_PORT SER_DATA_PORT + 4 #define SER_LINE_STAT_PORT SER_DATA_PORT + 5 - + void ATI_API_CALL __ke_printc(char c) { while((inb(SER_LINE_STAT_PORT) & 0x20) == 0 ); //wait until Transmitter Holding Register Empty @@ -2028,7 +2028,7 @@ void ATI_API_CALL __ke_printc(char c) void ATI_API_CALL __ke_printstr(const char *str) { int len = strlen(str); - while(len--)__ke_printc(*str++); + while(len--)__ke_printc(*str++); } int ATI_API_CALL __ke_SerPrint(const char *format, ...) @@ -2041,26 +2041,26 @@ int ATI_API_CALL __ke_SerPrint(const char *format, ...) vsprintf(buffer, format, ap); va_end(ap); - + __ke_printstr(buffer); - + return 0; } void ATI_API_CALL __ke_SetSerialPort() { DRM_INFO("setup serial port\n"); - outb(0x00, SER_INT_CTRL_PORT); // Turn off interrupts - - outb(0x80, SER_LINE_CTRL_PORT); // SET DLAB ON - outb(0x01, SER_DATA_PORT); // Set Baud rate - Divisor Latch Low Byte - // 0x01 = 115,200 ,0x02 = 57,600, 0x06 = 19,200 BPS, 0x0C = 9,600 BPS - outb(0x00, SER_DATA_PORT + 1); // Set Baud rate - Divisor Latch High Byte - outb(0x03, SER_LINE_CTRL_PORT); // reset DLAB ,8 Bits, No Parity, 1 Stop Bit - outb(0xC7, SER_DATA_PORT + 2); // FIFO Control Register + outb(0x00, SER_INT_CTRL_PORT); // Turn off interrupts + + outb(0x80, SER_LINE_CTRL_PORT); // SET DLAB ON + outb(0x01, SER_DATA_PORT); // Set Baud rate - Divisor Latch Low Byte + // 0x01 = 115,200 ,0x02 = 57,600, 0x06 = 19,200 BPS, 0x0C = 9,600 BPS + outb(0x00, SER_DATA_PORT + 1); // Set Baud rate - Divisor Latch High Byte + outb(0x03, SER_LINE_CTRL_PORT); // reset DLAB ,8 Bits, No Parity, 1 Stop Bit + outb(0xC7, SER_DATA_PORT + 2); // FIFO Control Register outb(0x0b, SER_DATA_PORT + 4); // Turn on DTR, RTS, and OUT2 - + __ke_printstr("serial port 0x3f8 is set ready for message print out \n"); -} +} #endif /** \brief Get number of available RAM pages @@ -2241,7 +2241,7 @@ void *ATI_API_CALL __ke_vmalloc_to_addr(void *vmalloc_addr) struct page *page = NULL; page = vmalloc_to_page(vmalloc_addr); - if(page == NULL) + if(page == NULL) { __KE_ERROR("__ke_vmalloc_to_addr: invalid page!"); return NULL; @@ -2283,7 +2283,7 @@ int ATI_API_CALL __ke_do_munmap(unsigned long addr, unsigned long len) retcode = do_munmap(current->mm, addr, len); -#endif +#endif up_write(¤t->mm->mmap_sem); return retcode; } @@ -2327,10 +2327,10 @@ long long ATI_API_CALL __divdi3(long long n, long long base) minus = !minus; } else - { + { ubase = base; } - + do_div(un, ubase); return (minus? -un : un); } @@ -2360,7 +2360,7 @@ long long ATI_API_CALL __moddi3(long long n, long long base) else { ubase = base; - } + } rem = do_div(un, ubase); return (minus? -rem : rem); @@ -2391,7 +2391,7 @@ void* ATI_API_CALL __ke_vmap(unsigned long *pagelist, unsigned int count) vaddr = (void *) vmap(pages, count); #else #ifdef VM_MAP - vaddr = (void *) vmap(pages, count, VM_MAP, PAGE_KERNEL); + vaddr = (void *) vmap(pages, count, VM_MAP, PAGE_KERNEL); #else vaddr = (void *) vmap(pages, count, 0, PAGE_KERNEL); #endif @@ -2447,7 +2447,7 @@ void ATI_API_CALL __ke_vunmap(void* addr) } #endif // defined(VM_MAP) || defined(vunmap) -/** \brief Reserve a memory page +/** \brief Reserve a memory page * * \param pt Kernel logical address of the page * @@ -2459,7 +2459,7 @@ void ATI_API_CALL KCL_ReserveMemPage(void* pt) SetPageReserved(virt_to_page((unsigned long)pt)); } -/** \brief Unreserve a memory page +/** \brief Unreserve a memory page * * \param pt Kernel logical address of the page * @@ -2471,7 +2471,7 @@ void ATI_API_CALL KCL_UnreserveMemPage(void* pt) ClearPageReserved(virt_to_page((unsigned long)pt)); } -/** \brief Lock a memory page +/** \brief Lock a memory page * * \param pt Kernel logical address of the page * @@ -2480,14 +2480,14 @@ void ATI_API_CALL KCL_UnreserveMemPage(void* pt) */ void ATI_API_CALL KCL_LockMemPage(void* pt) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) SetPageReserved(virt_to_page((unsigned long)pt)); #else lock_page(virt_to_page((unsigned long)pt)); #endif } -/** \brief Unlock a memory page +/** \brief Unlock a memory page * * \param pt Kernel logical address of the page * @@ -2496,7 +2496,7 @@ void ATI_API_CALL KCL_LockMemPage(void* pt) */ void ATI_API_CALL KCL_UnlockMemPage(void* pt) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ClearPageReserved(virt_to_page((unsigned long)pt)); #else unlock_page(virt_to_page((unsigned long)pt)); @@ -2521,7 +2521,7 @@ void* ATI_API_CALL __ke_get_vmptr( struct _agp_memory* memory ) return memory->vmptr; } #endif - + void* ATI_API_CALL __ke_ioremap(unsigned long offset, unsigned long size) { return ioremap(offset, size); @@ -2592,7 +2592,7 @@ void ATI_API_CALL KCL_flush_tlb_onepage(struct vm_area_struct * vma, unsigned lo { /*Some kernel developer removed the export of symbol "flush_tlb_page" on 2.6.25 x86_64 SMP kernel. Define a simple version here.*/ -#if defined(__x86_64__) && defined(__SMP__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)) +#if defined(__x86_64__) && defined(__SMP__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)) on_each_cpu(KCL_flush_tlb_one, &va, 1, 1); #else flush_tlb_page(vma, va); @@ -2649,9 +2649,9 @@ void ATI_API_CALL __ke_put_vm_page_table(unsigned long page_addr) static inline int ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { int ret = 0; - + DBG_ENTER("0x%08X, 0x%08X, 0x%08X->0x%08X", vma, addr, ptep, *ptep); - + if (pte_dirty(*ptep)) { #ifdef __x86_64__ @@ -2666,19 +2666,19 @@ static inline int ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned lo { pte_update(vma->vm_mm, addr, ptep); } -#endif +#endif } DBG_TRACE("0x%08X->0x%08X", ptep, *ptep); - + // Flush Translation Lookaside Buffers if (ret) { KCL_flush_tlb_onepage(vma,addr); } - + DBG_LEAVE("%d", ret); - + return ret; } #endif @@ -2705,7 +2705,7 @@ static inline int ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned lo * the virtual address * * \param mm Pointer to the memory descriptor structure - * \param virtual_addr Virtual address + * \param virtual_addr Virtual address * * \return Old value of the "dirty" flag on success or negative on error * @@ -2961,7 +2961,7 @@ int ATI_API_CALL __ke_config_mtrr(void) return 0; #else return 1; -#endif +#endif #else /* !CONFIG_MTRR */ return 0; #endif /* !CONFIG_MTRR */ @@ -2987,7 +2987,7 @@ int ATI_API_CALL __ke_mtrr_del(int reg, unsigned long base, unsigned long size) int ATI_API_CALL __ke_has_vmap(void) { -// We disable vmap for 2.4.x kernel to work around the big memory( > 4GB ) issue. +// We disable vmap for 2.4.x kernel to work around the big memory( > 4GB ) issue. #if defined(VM_MAP) || defined(vunmap) return 1; #else @@ -3009,7 +3009,7 @@ int ATI_API_CALL __ke_no_iommu(void) { return 0; } -#endif +#endif /*****************************************************************************/ @@ -3066,7 +3066,7 @@ unsigned int ATI_API_CALL __ke_pci_get_func(__ke_pci_dev_t* pcidev) { struct pci_dev* dev = (struct pci_dev*)pcidev; return PCI_FUNC(dev->devfn); -} +} __ke_dma_addr_t ATI_API_CALL __ke_pci_map_single (__ke_pci_dev_t *pdev, void *buffer, __ke_size_t size, int direction) { @@ -3153,7 +3153,7 @@ static irqreturn_t ke_irq_handler_wrap(int irq, void *arg1) return IRQ_HANDLED; } #endif - + int ATI_API_CALL __ke_request_irq(unsigned int irq, void (*ATI_API_CALL handler)(int, void *, void *), const char *dev_name, void *dev_id) @@ -3166,7 +3166,7 @@ int ATI_API_CALL __ke_request_irq(unsigned int irq, SA_SHIRQ, #else IRQF_SHARED, -#endif +#endif dev_name, dev_id); } @@ -3203,12 +3203,12 @@ int ATI_API_CALL __ke_agp_memory_get_page_count(struct _agp_memory* agpmem) return (int)(agpmem->page_count); } -void ATI_API_CALL __ke_agp_memory_get_memory(struct _agp_memory* agpmem, +void ATI_API_CALL __ke_agp_memory_get_memory(struct _agp_memory* agpmem, unsigned long **memory_ptr) { __KE_DEBUG("[%s] agpmem=0x%016lx agpmem->memory=0x%016lx [0]=0x%016x", - __FUNCTION__, - (unsigned long)agpmem, + __FUNCTION__, + (unsigned long)agpmem, (unsigned long)agpmem->memory, (agpmem->memory)[0]); @@ -3217,18 +3217,9 @@ void ATI_API_CALL __ke_agp_memory_get_memory(struct _agp_memory* agpmem, /*****************************************************************************/ -#ifndef NOPAGE_SIGBUS -#define NOPAGE_SIGBUS 0 -#endif /* !NOPAGE_SIGBUS */ - typedef struct page mem_map_t; typedef mem_map_t *vm_nopage_ret_t; -static __inline__ vm_nopage_ret_t do_vm_nopage(struct vm_area_struct* vma, - unsigned long address) -{ - return 0; /* Disallow mremap */ -} #ifdef __AGP__BUILTIN__ #ifdef __ia64__ @@ -3253,19 +3244,20 @@ static __inline__ vm_nopage_ret_t do_vm_cant_nopage(struct vm_area_struct* vma, return page; } } - return NOPAGE_SIGBUS; /* Disallow mremap */ + return VM_FAULT_SIGBUS; /* Disallow mremap */ } #endif /* __ia64__ */ #endif /* __AGP__BUILTIN__ */ -static __inline__ vm_nopage_ret_t do_vm_shm_nopage(struct vm_area_struct* vma, - unsigned long address) +static __inline__ int do_vm_shm_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { pgd_t* pgd_p; pmd_t* pmd_p; pte_t pte; + unsigned long address = (unsigned long)vmf->virtual_address; unsigned long vma_offset; unsigned long pte_linear; mem_map_t* pMmPage; @@ -3294,7 +3286,7 @@ static __inline__ vm_nopage_ret_t do_vm_shm_nopage(struct vm_area_struct* vma, (unsigned long)__ke_vm_offset(vma)); if (address > vma->vm_end) - return NOPAGE_SIGBUS; /* address is out of range */ + return VM_FAULT_SIGBUS; /* address is out of range */ /* Calculate offset into VMA */ vma_offset = address - vma->vm_start; @@ -3306,7 +3298,7 @@ static __inline__ vm_nopage_ret_t do_vm_shm_nopage(struct vm_area_struct* vma, pte_linear = firegl_get_addr_from_vm(vma); if (!pte_linear) { - return NOPAGE_SIGBUS; /* bad address */ + return VM_FAULT_SIGBUS; /* bad address */ } pte_linear += vma_offset; @@ -3334,7 +3326,9 @@ static __inline__ vm_nopage_ret_t do_vm_shm_nopage(struct vm_area_struct* vma, // __KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n", // address, page_address(pMmPage)); - return pMmPage; + vmf->page = pMmPage; + + return 0; } /* @@ -3343,8 +3337,10 @@ static __inline__ vm_nopage_ret_t do_vm_shm_nopage(struct vm_area_struct* vma, (which is one ore more pages in size) */ -static __inline__ vm_nopage_ret_t do_vm_dma_nopage(struct vm_area_struct* vma, unsigned long address) +static __inline__ int do_vm_dma_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { + unsigned long address = (unsigned long) vmf->virtual_address; unsigned long kaddr; mem_map_t* pMmPage; @@ -3360,7 +3356,7 @@ static __inline__ vm_nopage_ret_t do_vm_dma_nopage(struct vm_area_struct* vma, u kaddr = firegl_get_addr_from_vm(vma); if (!kaddr) { - return NOPAGE_SIGBUS; /* bad address */ + return VM_FAULT_SIGBUS; /* bad address */ } kaddr += (address - vma->vm_start); @@ -3372,19 +3368,23 @@ static __inline__ vm_nopage_ret_t do_vm_dma_nopage(struct vm_area_struct* vma, u // with drm_alloc_pages, which marks all pages as reserved. Reserved // pages' usage count is not decremented by the kernel during unmap!!! // - // For kernel >= 2.6.15, We should reenable this, because the VM sub-system - // will decrement the pages' usage count even for the pages marked as reserved + // For kernel >= 2.6.15, We should reenable this, because the VM sub-system + // will decrement the pages' usage count even for the pages marked as reserved // - MC. get_page(pMmPage); /* inc usage count of page */ #endif __KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n", address, page_address(pMmPage)); - return pMmPage; + vmf->page = pMmPage; + + return 0; } -static __inline__ vm_nopage_ret_t do_vm_kmap_nopage(struct vm_area_struct* vma, unsigned long address) +static __inline__ int do_vm_kmap_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { + unsigned long address = (unsigned long) vmf->virtual_address; unsigned long kaddr; mem_map_t* pMmPage; @@ -3394,13 +3394,14 @@ static __inline__ vm_nopage_ret_t do_vm_kmap_nopage(struct vm_area_struct* vma, if ((pMmPage = (mem_map_t*) firegl_get_pagetable_page_from_vm(vma))) { get_page(pMmPage); - return pMmPage; + vmf->page = pMmPage; + return 0; } kaddr = firegl_get_addr_from_vm(vma); if (!kaddr) { - return NOPAGE_SIGBUS; /* bad address */ + return VM_FAULT_SIGBUS; /* bad address */ } kaddr += (address - vma->vm_start); @@ -3413,50 +3414,52 @@ static __inline__ vm_nopage_ret_t do_vm_kmap_nopage(struct vm_area_struct* vma, __KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n", address, page_address(pMmPage)); - return pMmPage; + vmf->page = pMmPage; + return 0; } -/** +/** ** - ** This routine is intented to locate the page table through the + ** This routine is intented to locate the page table through the ** pagelist table created earlier in dev-> pcie **/ -static __inline__ vm_nopage_ret_t do_vm_pcie_nopage(struct vm_area_struct* vma, - unsigned long address) +static __inline__ int do_vm_pcie_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { + unsigned long address = (unsigned long)vmf->virtual_address; unsigned long vma_offset; - unsigned long i; + unsigned long i; mem_map_t* pMmPage; struct firegl_pcie_mem* pciemem; unsigned long* pagelist; - + drm_device_t *dev = (drm_device_t *)firegl_get_dev_from_vm(vma); if (dev == NULL) { __KE_ERROR("dev is NULL\n"); - return NOPAGE_SIGBUS; + return VM_FAULT_SIGBUS; } if (address > vma->vm_end) { __KE_ERROR("address out of range\n"); - return NOPAGE_SIGBUS; /* address is out of range */ + return VM_FAULT_SIGBUS; /* address is out of range */ } pciemem = firegl_get_pciemem_from_addr ( vma, address); if (pciemem == NULL) { __KE_ERROR("No pciemem found! \n"); - return NOPAGE_SIGBUS; - } + return VM_FAULT_SIGBUS; + } pagelist = firegl_get_pagelist_from_vm(vma); - if (pagelist == NULL) + if (pagelist == NULL) { __KE_ERROR("No pagelist! \n"); - return NOPAGE_SIGBUS; + return VM_FAULT_SIGBUS; } - + /** Find offset in vma */ vma_offset = address - vma->vm_start; /** Which entry in the pagelist */ @@ -3468,15 +3471,17 @@ static __inline__ vm_nopage_ret_t do_vm_pcie_nopage(struct vm_area_struct* vma, if (page_address(pMmPage) == 0x0) { __KE_ERROR("Invalid page address\n"); - return NOPAGE_SIGBUS; + return VM_FAULT_SIGBUS; } - return pMmPage; + + vmf->page = pMmPage; + return 0; } -static __inline__ vm_nopage_ret_t do_vm_gart_nopage(struct vm_area_struct* vma, - unsigned long address) +static __inline__ int do_vm_gart_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { - + unsigned long address = (unsigned long) vmf->virtual_address; unsigned long page_addr; unsigned long offset; struct page *page; @@ -3484,36 +3489,31 @@ static __inline__ vm_nopage_ret_t do_vm_gart_nopage(struct vm_area_struct* vma, if (address > vma->vm_end) { __KE_ERROR("Invalid virtual address\n"); - return NOPAGE_SIGBUS; /* Disallow mremap */ - } + return VM_FAULT_SIGBUS; /* Disallow mremap */ + } offset = address - vma->vm_start; -#ifdef FIREGL_CF_SUPPORT +#ifdef FIREGL_CF_SUPPORT page_addr = mc_heap_get_page_addr(vma, offset); #else page_addr = firegl_cmmqs_get_pageaddr_from_vm(vma, offset); -#endif +#endif if( !page_addr) { __KE_ERROR("Invalid page address\n"); - return NOPAGE_SIGBUS; /* Disallow mremap */ + return VM_FAULT_SIGBUS; /* Disallow mremap */ } page = virt_to_page(page_addr); get_page(page); - return page; + vmf->page = page; + return 0; } - - -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) - -static vm_nopage_ret_t vm_nopage(struct vm_area_struct* vma, - unsigned long address, - int *type) +static int vm_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { - if (type) *type = VM_FAULT_MINOR; - return do_vm_nopage(vma, address); + return VM_FAULT_SIGBUS; } #ifdef __AGP__BUILTIN__ @@ -3545,91 +3545,10 @@ static vm_nopage_ret_t vm_cant_nopage(struct vm_area_struct* vma, (which is one or more pages in size) */ -static vm_nopage_ret_t vm_shm_nopage(struct vm_area_struct* vma, - unsigned long address, - int *type) -{ - if (type) *type = VM_FAULT_MINOR; - return do_vm_shm_nopage(vma, address); -} - -/* - - This routine is intended to remap addresses of a OpenGL context - (which is one ore more pages in size) - -*/ -static vm_nopage_ret_t vm_dma_nopage(struct vm_area_struct* vma, - unsigned long address, - int *type) -{ - if (type) *type = VM_FAULT_MINOR; - return do_vm_dma_nopage(vma, address); -} - -static vm_nopage_ret_t vm_kmap_nopage(struct vm_area_struct* vma, - unsigned long address, - int *type) -{ - if (type) *type = VM_FAULT_MINOR; - return do_vm_kmap_nopage(vma, address); -} - -static vm_nopage_ret_t vm_pcie_nopage(struct vm_area_struct* vma, - unsigned long address, - int *type) -{ - return do_vm_pcie_nopage(vma, address); -} - -static vm_nopage_ret_t vm_gart_nopage(struct vm_area_struct* vma, - unsigned long address, - int *type) -{ - return do_vm_gart_nopage(vma, address); -} - -#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */ - -static vm_nopage_ret_t vm_nopage(struct vm_area_struct* vma, - unsigned long address, - int write_access) -{ - return do_vm_nopage(vma, address); -} - -#ifdef __AGP__BUILTIN__ -#ifdef __ia64__ - - -static vm_nopage_ret_t vm_cant_nopage(struct vm_area_struct* vma, - unsigned long address, - int write_access) -{ - return do_vm_cant_nopage(vma, address); -} -#endif /* __ia64__ */ -#endif /* __AGP__BUILTIN__ */ - -/* - - This function is called when a page of a mmap()'ed area is not currently - visible in the specified VMA. - Return value is the associated physical address for the requested page. - (If not implemented, then the kernel default routine would allocate a new, - zeroed page for servicing us) - - Possible errors: SIGBUS, OutOfMem - - This routine is intended to remap addresses of SHM SAREA - (which is one or more pages in size) - - */ -static vm_nopage_ret_t vm_shm_nopage(struct vm_area_struct* vma, - unsigned long address, - int write_access) +static int vm_shm_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { - return do_vm_shm_nopage(vma, address); + return do_vm_shm_nopage(vma, vmf); } /* @@ -3638,37 +3557,30 @@ static vm_nopage_ret_t vm_shm_nopage(struct vm_area_struct* vma, (which is one ore more pages in size) */ -static vm_nopage_ret_t vm_dma_nopage(struct vm_area_struct* vma, - unsigned long address, - int write_access) +static int vm_dma_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { - return do_vm_dma_nopage(vma, address); + return do_vm_dma_nopage(vma, vmf); } -static vm_nopage_ret_t vm_kmap_nopage(struct vm_area_struct* vma, - unsigned long address, - int write_access) +static int vm_kmap_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { - return do_vm_kmap_nopage(vma, address); + return do_vm_kmap_nopage(vma, vmf); } -static vm_nopage_ret_t vm_pcie_nopage(struct vm_area_struct* vma, - unsigned long address, - int write_access) +static int vm_pcie_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { - return do_vm_pcie_nopage(vma, address); + return do_vm_pcie_nopage(vma, vmf); } -static vm_nopage_ret_t vm_gart_nopage(struct vm_area_struct* vma, - unsigned long address, - int *type) +static int vm_gart_nopage(struct vm_area_struct* vma, + struct vm_fault *vmf) { - return do_vm_gart_nopage(vma, address); + return do_vm_gart_nopage(vma, vmf); } - -#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) */ - void* ATI_API_CALL __ke_vma_file_priv(struct vm_area_struct* vma) { return vma->vm_file->private_data; @@ -3718,7 +3630,7 @@ char* ATI_API_CALL __ke_vm_page_prot_str(struct vm_area_struct* vma, char* buf) *(buf + i++) = pgprot & _PAGE_DIRTY ? 'd' : '-'; *(buf + i++) = pgprot & _PAGE_PSE ? 'm' : 'k'; *(buf + i++) = pgprot & _PAGE_GLOBAL ? 'g' : 'l'; -#endif /* __i386__ */ +#endif /* __i386__ */ *(buf + i++) = 0; return buf; @@ -3747,9 +3659,9 @@ char *__ke_pte_phys_addr_str(pte_t pte, char *buf, __ke_dma_addr_t* phys_address return buf; } -char* ATI_API_CALL __ke_vm_phys_addr_str(struct vm_area_struct* vma, - char* buf, - unsigned long virtual_addr, +char* ATI_API_CALL __ke_vm_phys_addr_str(struct vm_area_struct* vma, + char* buf, + unsigned long virtual_addr, __ke_dma_addr_t* phys_address) { pgd_t* pgd_p; @@ -3773,7 +3685,7 @@ void ip_drm_vm_close(struct vm_area_struct* vma) static struct vm_operations_struct vm_ops = { - nopage: vm_nopage, + fault: vm_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; @@ -3791,42 +3703,42 @@ static struct vm_operations_struct vm_cant_ops = static struct vm_operations_struct vm_shm_ops = { - nopage: vm_shm_nopage, + fault: do_vm_shm_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; static struct vm_operations_struct vm_pci_bq_ops = { - nopage: vm_dma_nopage, + fault: vm_dma_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; static struct vm_operations_struct vm_ctx_ops = { - nopage: vm_dma_nopage, + fault: vm_dma_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; -static struct vm_operations_struct vm_pcie_ops = +static struct vm_operations_struct vm_pcie_ops = { - nopage: vm_pcie_nopage, + fault: vm_pcie_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; static struct vm_operations_struct vm_kmap_ops = { - nopage: vm_kmap_nopage, + fault: vm_kmap_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; static struct vm_operations_struct vm_gart_ops = { - nopage: vm_gart_nopage, + fault: vm_gart_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; @@ -3835,14 +3747,14 @@ static struct vm_operations_struct vm_gart_ops = #ifndef __ia64__ static struct vm_operations_struct vm_agp_bq_ops = { - nopage: vm_nopage, + fault: vm_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; -#else +#else static struct vm_operations_struct vm_cant_agp_bq_ops = { - nopage: vm_cant_nopage, + fault: vm_cant_nopage, open: ip_drm_vm_open, close: ip_drm_vm_close, }; @@ -3877,19 +3789,19 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, #ifdef __i386__ if (boot_cpu_data.x86 > 3) { -#ifdef FIREGL_USWC_SUPPORT +#ifdef FIREGL_USWC_SUPPORT if (!firegl_pat_enabled) -#endif +#endif { pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; } -#ifdef FIREGL_USWC_SUPPORT +#ifdef FIREGL_USWC_SUPPORT else { vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - } -#endif + } +#endif } #endif /* __i386__ */ #ifdef __ia64__ @@ -3908,7 +3820,7 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, } break; -#ifdef FIREGL_USWC_SUPPORT +#ifdef FIREGL_USWC_SUPPORT case __KE_ADPT_REG: { #ifdef __ia64__ @@ -3928,7 +3840,7 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, } else { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); } } #endif /* __i386__ */ @@ -3947,7 +3859,7 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, vma->vm_ops = &vm_ops; } break; -#endif +#endif case __KE_SHM: vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */ @@ -3981,7 +3893,7 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, #ifdef __AGP__BUILTIN__ case __KE_AGP: - // if(dev->agp->cant_use_aperture == 1) + // if(dev->agp->cant_use_aperture == 1) #ifdef __ia64__ { /* @@ -4005,9 +3917,9 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, if( firegl_pat_enabled ) { vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - } + } } -#endif +#endif if (REMAP_PAGE_RANGE(vma,offset)) { @@ -4024,8 +3936,8 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, #endif break; case __KE_AGP_BQS: - // if(dev->agp->cant_use_aperture == 1) -#ifdef __ia64__ + // if(dev->agp->cant_use_aperture == 1) +#ifdef __ia64__ { /* * On some systems we can't talk to bus dma address from @@ -4048,9 +3960,9 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, if( firegl_pat_enabled ) { vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - } + } } -#endif +#endif if (REMAP_PAGE_RANGE(vma,offset)) { @@ -4079,15 +3991,15 @@ int ATI_API_CALL __ke_vm_map(struct file* filp, break; case __KE_GART_USWC: -#ifdef FIREGL_USWC_SUPPORT +#ifdef FIREGL_USWC_SUPPORT if (boot_cpu_data.x86 > 3) { if( firegl_pat_enabled ) { vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - } + } } -#endif +#endif // fall through case __KE_GART_CACHEABLE: vma->vm_flags |= VM_RESERVED; @@ -4137,7 +4049,7 @@ extern struct _agp_memory *_X(agp_allocate_memory_phys_list)(size_t, u32, u64 *) #define FIREGL_agp_backend_release _X(agp_backend_release) #define FIREGL_agp_memory _X(agp_memory) -unsigned int __ke_firegl_agpgart_inuse = AGPGART_INUSE_NONE; +unsigned int __ke_firegl_agpgart_inuse = AGPGART_INUSE_NONE; #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) /*****************************************************************************/ @@ -4252,7 +4164,7 @@ static int ATI_API_CALL __ke_agpgart_available(__ke_pci_dev_t *pcidev, int use_internal) { drm_agp_module_stub = &drm_agp; - __ke_firegl_agpgart_inuse = KERNEL26_AGPGART_INUSE; + __ke_firegl_agpgart_inuse = KERNEL26_AGPGART_INUSE; { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) firegl_pci_device = (struct pci_dev*)(void*)pcidev; @@ -4305,7 +4217,7 @@ int ATI_API_CALL __ke_agp_available(__ke_pci_dev_t *pcidev, int use_internal) } else { available = __ke_firegl_agpgart_available(); } - + return available; } @@ -4410,7 +4322,7 @@ void ATI_API_CALL __ke_agp_copy_info(__ke_agp_kern_info_t* info) if (AGP_AVAILABLE(copy_info)) { - struct agp_kern_info kern; + struct agp_kern_info kern; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) if (firegl_agp_bridge == NULL) @@ -4508,7 +4420,7 @@ int ATI_API_CALL __ke_agp_enable(unsigned long mode) int ATI_API_CALL __ke_read_agp_caps_registers(__ke_pci_dev_t* dev, unsigned int *caps) { - return -EINVAL; + return -EINVAL; } int ATI_API_CALL __ke_agp_acquire(__ke_pci_dev_t* dev) @@ -4550,9 +4462,9 @@ int ATI_API_CALL __ke_smp_processor_id(void) /** \brief Runs a function on all other CPUs * \param func_to_call function to be called on all other cpus - * \return None + * \return None */ -void ATI_API_CALL KCL_CallFuncOnOtherCpus(firegl_void_routine_t func_to_call) +void ATI_API_CALL KCL_CallFuncOnOtherCpus(firegl_void_routine_t func_to_call) { #ifdef CONFIG_SMP smp_call_function( firegl_smp_func_parameter_wrap, (void*)func_to_call, 0, 1 ); @@ -4659,7 +4571,7 @@ int ATI_API_CALL __ke_has_PSE(void) int ATI_API_CALL KCL_is_pat_enabled(void) { - return firegl_pat_enabled; + return firegl_pat_enabled; } static int ATI_API_CALL KCL_has_pat(void) @@ -4675,7 +4587,7 @@ static void KCL_setup_pat(void *info) { unsigned long cr0=0, cr4=0; unsigned long flags; - + local_irq_save(flags); cr0 = read_cr0() | 0x40000000; write_cr0(cr0); @@ -4706,7 +4618,7 @@ static void KCL_restore_pat(void *info) { unsigned long cr0 = 0, cr4 = 0; unsigned long flags; - + local_irq_save(flags); cr0 = read_cr0() | 0x40000000; write_cr0(cr0); @@ -4718,7 +4630,7 @@ static void KCL_restore_pat(void *info) write_cr4(cr4 & ~X86_CR4_PGE); } __flush_tlb(); - + wrmsr(MSR_IA32_CR_PAT, KCL_orig_pat[0], KCL_orig_pat[1]); cr0 = read_cr0(); @@ -4741,7 +4653,7 @@ static int ATI_API_CALL KCL_enable_pat(void) __KE_INFO("USWC is disabled in module parameters\n"); return 0; } - + if (!KCL_has_pat()) { return 0; @@ -4751,13 +4663,13 @@ static int ATI_API_CALL KCL_enable_pat(void) for ( i = 0; i < 2; i++ ) { - for (j = 0; j < 4; j ++) + for (j = 0; j < 4; j ++) { if (((KCL_orig_pat[i] >> (j * 8)) & 0xFF) == 1) - { + { __KE_ERROR("Pat entry %d is already configured\n", (i+1)*(j+1)); return 0; - } + } } } @@ -4829,7 +4741,7 @@ typedef struct tag_kasContext_t } kasContext_t; /** \brief KAS context */ -static kasContext_t kasContext; +static kasContext_t kasContext; /** \brief Kernel support required to enable KAS */ #if defined(cmpxchg) && \ @@ -5137,7 +5049,7 @@ unsigned int ATI_API_CALL KAS_Ih_Execute(KAS_IhRoutine_t ih_routine, DBG_TRACE("Interrupt handler returned 0x%08X", ret); kasSetExecutionLevel(orig_level); - spin_unlock(&kasContext.lock_ih); + spin_unlock(&kasContext.lock_ih); DBG_LEAVE("%d", ret); return ret; @@ -5412,7 +5324,7 @@ typedef struct tag_kasSlabCache_t #endif spinlock_t lock; /* OS spinlock object protecting the cache */ unsigned int routine_type; /* Type of routine the cache might be accessed from */ - char name[14]; /* Cache object name (kernel 2.4 restricts its length to 19 chars) */ + char name[24]; /* Cache object name (kernel 2.4 restricts its length to 19 chars) */ } kasSlabCache_t; /** \brief Return Slab Cache object size @@ -5450,7 +5362,8 @@ unsigned int ATI_API_CALL KAS_SlabCache_Initialize(void* hSlabCache, slabcache_obj->routine_type = access_type; spin_lock_init(&(slabcache_obj->lock)); - sprintf(slabcache_obj->name, "kas(%08lX)",(unsigned long)slabcache_obj); + snprintf(slabcache_obj->name, sizeof(slabcache_obj->name), + "kas(%p)", slabcache_obj); DBG_TRACE("creating slab object '%s'", slabcache_obj->name); @@ -6366,7 +6279,7 @@ unsigned int ATI_API_CALL KAS_AtomicAddInt( kas_xadd(puiDestination, iAdd, ret, "l"); - return ret + iAdd; + return ret + iAdd; #else return 0xDEADC0DE; /* To make compiler happy */ #endif @@ -6449,7 +6362,7 @@ long ATI_API_CALL KAS_ScheduleTimeout(long n_jiffies) #ifdef FIREGL_CF_SUPPORT void *ATI_API_CALL KCL_lock_init() -{ +{ spinlock_t *lock; lock = kmalloc(sizeof(*lock), GFP_KERNEL); @@ -6461,7 +6374,7 @@ void *ATI_API_CALL KCL_lock_init() } void ATI_API_CALL KCL_lock_deinit(void *plock) -{ +{ if (plock == NULL) { __KE_ERROR("plock is NULL\n");