Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 196820 | Differences between
and this patch

Collapse All | Expand All

(-)common/lib/modules/fglrx/build_mod/firegl_public.c~ (-48 / +116 lines)
Lines 796-802 Link Here
796
        
796
        
797
    // since privdev->pcidev is acquired in X server, use pdev 
797
    // since privdev->pcidev is acquired in X server, use pdev 
798
    // directly here to allow suspend/resume without X server start. 
798
    // directly here to allow suspend/resume without X server start. 
799
        firegl_pci_save_state(pdev, privdev);
799
        firegl_pci_save_state((__ke_pci_dev_t*)pdev, privdev);
800
        pci_disable_device(pdev);
800
        pci_disable_device(pdev);
801
        PMSG_EVENT(pdev->dev.power.power_state) = state;
801
        PMSG_EVENT(pdev->dev.power.power_state) = state;
802
    }
802
    }
Lines 838-844 Link Here
838
838
839
    // PCI config space needs to be restored very early, in particular
839
    // PCI config space needs to be restored very early, in particular
840
    // before pci_set_master!
840
    // before pci_set_master!
841
    firegl_pci_restore_state(pdev, privdev);
841
    firegl_pci_restore_state((__ke_pci_dev_t*)pdev, privdev);
842
842
843
    if (pci_enable_device(pdev)) 
843
    if (pci_enable_device(pdev)) 
844
    {
844
    {
Lines 2016-2022 Link Here
2016
2016
2017
__ke_pci_dev_t* ATI_API_CALL __ke_pci_find_device (unsigned int vendor, unsigned int dev, __ke_pci_dev_t* from)
2017
__ke_pci_dev_t* ATI_API_CALL __ke_pci_find_device (unsigned int vendor, unsigned int dev, __ke_pci_dev_t* from)
2018
{
2018
{
2019
	return (__ke_pci_dev_t*)pci_find_device( vendor, dev, (struct pci_dev *)(void *)from );
2019
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2020
    return (__ke_pci_dev_t*)pci_get_device( vendor, dev, (struct pci_dev *)(void *)from );
2021
#else
2022
    return (__ke_pci_dev_t*)pci_find_device( vendor, dev, (struct pci_dev *)(void *)from );
2023
#endif
2020
}
2024
}
2021
2025
2022
void* ATI_API_CALL __ke_malloc(__ke_size_t size)
2026
void* ATI_API_CALL __ke_malloc(__ke_size_t size)
Lines 2487-2502 Link Here
2487
}
2491
}
2488
2492
2489
#ifndef ptep_clear_flush_dirty
2493
#ifndef ptep_clear_flush_dirty
2490
#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
2494
/** \brief Test and clear the "dirty" bit in the page table entry
2491
({							 \
2495
 *
2492
    int __dirty = ptep_test_and_clear_dirty(__ptep);	 \
2496
 * \param vma Pointer to the memory region structure
2493
    if (__dirty)					 \
2497
 * \param addr Virtual address covered by vma
2494
        flush_tlb_page(__vma, __address);		 \
2498
 * \param ptep Pointer to the table entry structure
2495
    __dirty;						 \
2499
 *
2496
})
2500
 * \return Old value of the "dirty" flag
2501
 *
2502
 */
2503
static inline int ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
2504
{
2505
    int ret = 0;
2506
    
2507
    DBG_ENTER("0x%08X, 0x%08X, 0x%08X->0x%08X", vma, addr, ptep, *ptep);
2508
    
2509
    if (pte_dirty(*ptep))
2510
    {
2511
#ifdef __x86_64__
2512
        DBG_TRACE("Test and clear bit %d in 0x%08X", _PAGE_BIT_DIRTY, ptep->pte);
2513
        ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
2514
#else
2515
        DBG_TRACE("Test and clear bit %d in 0x%08X", _PAGE_BIT_DIRTY, ptep->pte_low);
2516
        ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
2517
2518
        // Since we modify PTE directly, it needs to inform the hypervisor
2519
        if (ret)
2520
        {
2521
            pte_update(vma->vm_mm, addr, ptep);
2522
        }
2523
#endif        
2524
    }
2525
2526
    DBG_TRACE("0x%08X->0x%08X", ptep, *ptep);
2527
    
2528
    // Flush Translation Lookaside Buffers
2529
    if (ret)
2530
    {
2531
        flush_tlb_page(vma, addr);
2532
    }
2533
    
2534
    DBG_LEAVE("%d", ret);
2535
    
2536
    return ret;
2537
}
2538
#endif
2539
2540
#ifdef pte_offset_atomic
2541
#define PTE_OFFSET_FUNC pte_offset_atomic
2542
#define PTE_UNMAP_FUNC(p) pte_kunmap(p)
2543
#else
2544
#ifdef pte_offset_map
2545
#define PTE_OFFSET_FUNC pte_offset_map
2546
#define PTE_UNMAP_FUNC(p) pte_unmap(p)
2547
#else
2548
#ifdef pte_offset_kernel
2549
#define PTE_OFFSET_FUNC pte_offset_kernel
2550
#define PTE_UNMAP_FUNC(p) do {} while (0)
2551
#else
2552
#define PTE_OFFSET_FUNC pte_offset
2553
#define PTE_UNMAP_FUNC(p) do {} while (0)
2554
#endif
2555
#endif
2497
#endif
2556
#endif
2498
2557
2499
int ATI_API_CALL __ke_vm_test_and_clear_dirty(struct mm_struct* mm, unsigned long virtual_addr)
2558
/** \brief Test and clear the "dirty" bit in the page table entry referred by
2559
 * the virtual address
2560
 *
2561
 * \param mm Pointer to the memory descriptor structure
2562
 * \param virtual_addr Virtual address 
2563
 *
2564
 * \return Old value of the "dirty" flag on success or negative on error
2565
 *
2566
 */
2567
int ATI_API_CALL KCL_TestAndClearPageDirtyFlag(struct mm_struct* mm, unsigned long virtual_addr)
2500
{
2568
{
2501
    int ret = -1; // init with page not present
2569
    int ret = -1; // init with page not present
2502
    pgd_t* pgd_p;
2570
    pgd_t* pgd_p;
Lines 2530-2566 Link Here
2530
    }
2598
    }
2531
    __KE_DEBUG("pmd_p=0x%08lx\n", (unsigned long)pmd_p);
2599
    __KE_DEBUG("pmd_p=0x%08lx\n", (unsigned long)pmd_p);
2532
2600
2533
#ifdef pte_offset_atomic
2601
    pte_p = PTE_OFFSET_FUNC(pmd_p, virtual_addr);
2534
    pte_p = pte_offset_atomic(pmd_p, virtual_addr);
2535
    if (pte_present(*pte_p))
2536
        ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
2537
    else
2538
        __KE_DEBUG("page not exists!\n");
2539
    pte_kunmap(pte_p);
2540
#else
2541
#ifdef pte_offset_map
2542
    pte_p = pte_offset_map(pmd_p, virtual_addr);
2543
    if (pte_present(*pte_p))
2544
        ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
2545
    else
2546
        __KE_DEBUG("page not exists!\n");
2547
    pte_unmap(pte_p);
2548
#else
2549
#ifdef pte_offset_kernel
2550
    pte_p = pte_offset_kernel(pmd_p, virtual_addr);
2551
    if (pte_present(*pte_p))
2552
        ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
2553
    else
2554
        __KE_DEBUG("page not exists!\n");
2555
#else
2556
    pte_p = pte_offset(pmd_p, virtual_addr);
2557
    if (pte_present(*pte_p))
2602
    if (pte_present(*pte_p))
2603
    {
2558
        ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
2604
        ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
2605
    }
2559
    else
2606
    else
2607
    {
2560
        __KE_DEBUG("page not exists!\n");
2608
        __KE_DEBUG("page not exists!\n");
2561
#endif
2609
    }
2562
#endif
2610
    PTE_UNMAP_FUNC(pte_p);
2563
#endif
2564
2611
2565
    if (debuglevel > 2)
2612
    if (debuglevel > 2)
2566
    {
2613
    {
Lines 2946-2965 Link Here
2946
#else
2993
#else
2947
static void ATI_API_CALL (*irq_handler_func)(int, void*, void*); /* function pointer variable */
2994
static void ATI_API_CALL (*irq_handler_func)(int, void*, void*); /* function pointer variable */
2948
2995
2996
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
2949
static irqreturn_t ke_irq_handler_wrap(int irq, void *arg1, struct pt_regs *regs)
2997
static irqreturn_t ke_irq_handler_wrap(int irq, void *arg1, struct pt_regs *regs)
2950
{
2998
{
2951
    irq_handler_func(irq, arg1, regs);
2999
    irq_handler_func(irq, arg1, regs);
2952
    return IRQ_HANDLED;
3000
    return IRQ_HANDLED;
2953
}
3001
}
2954
3002
#else
2955
int ATI_API_CALL __ke_request_irq(unsigned int irq, 
3003
static irqreturn_t ke_irq_handler_wrap(int irq, void *arg1)
3004
{
3005
    irq_handler_func(irq, arg1, (void *)0);
3006
    return IRQ_HANDLED;
3007
}
3008
#endif
3009
            
3010
int ATI_API_CALL __ke_request_irq(unsigned int irq,
2956
    void (*ATI_API_CALL handler)(int, void *, void *),
3011
    void (*ATI_API_CALL handler)(int, void *, void *),
2957
    const char *dev_name, void *dev_id)
3012
    const char *dev_name, void *dev_id)
2958
{
3013
{
2959
    irq_handler_func = handler;
3014
    irq_handler_func = handler;
2960
    return request_irq(irq,
3015
    return request_irq(
3016
        irq,
2961
        ke_irq_handler_wrap,
3017
        ke_irq_handler_wrap,
2962
        SA_SHIRQ, dev_name, dev_id);
3018
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3019
        SA_SHIRQ,
3020
#else
3021
        IRQF_SHARED,
3022
#endif                
3023
        dev_name,
3024
        dev_id);
2963
}
3025
}
2964
3026
2965
void ATI_API_CALL __ke_free_irq(unsigned int irq, void *dev_id)
3027
void ATI_API_CALL __ke_free_irq(unsigned int irq, void *dev_id)
Lines 3530-3541 Link Here
3530
#else
3592
#else
3531
        *phys_address = pte_val(pte) & (u64)((u64)PAGE_MASK | (u64)0xf<<32);
3593
        *phys_address = pte_val(pte) & (u64)((u64)PAGE_MASK | (u64)0xf<<32);
3532
#endif
3594
#endif
3533
        sprintf(buf, "0x%Lx %c%c%c%c%c%c\n",
3595
        sprintf(buf, "0x%Lx %c%c%c%c\n",
3534
           *phys_address,
3596
           *phys_address,
3535
           pte_present (pte) ? 'p' : '-',
3597
           pte_present (pte) ? 'p' : '-',
3536
           pte_read    (pte) ? 'r' : '-',
3537
           pte_write   (pte) ? 'w' : '-',
3598
           pte_write   (pte) ? 'w' : '-',
3538
           pte_exec    (pte) ? 'x' : '-',
3539
           pte_dirty   (pte) ? 'd' : '-',
3599
           pte_dirty   (pte) ? 'd' : '-',
3540
           pte_young   (pte) ? 'a' : '-');
3600
           pte_young   (pte) ? 'a' : '-');
3541
    }
3601
    }
Lines 5436-5442 Link Here
5436
/** \brief Type definition of the structure describing Slab Cache object */
5496
/** \brief Type definition of the structure describing Slab Cache object */
5437
typedef struct tag_kasSlabCache_t
5497
typedef struct tag_kasSlabCache_t
5438
{
5498
{
5439
    kmem_cache_t* cache;        /* OS slab cache object */
5499
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
5500
    struct kmem_cache *cache;   /* OS slab cache object */
5501
#else
5502
    kmem_cache_t *cache;        /* OS slab cache object */
5503
#endif
5440
    spinlock_t lock;            /* OS spinlock object protecting the cache */
5504
    spinlock_t lock;            /* OS spinlock object protecting the cache */
5441
    unsigned int routine_type;  /* Type of routine the cache might be accessed from */
5505
    unsigned int routine_type;  /* Type of routine the cache might be accessed from */
5442
    char name[14];              /* Cache object name (kernel 2.4 restricts its length to 19 chars) */
5506
    char name[14];              /* Cache object name (kernel 2.4 restricts its length to 19 chars) */
Lines 5482-5489 Link Here
5482
    DBG_TRACE("creating slab object '%s'", slabcache_obj->name);
5546
    DBG_TRACE("creating slab object '%s'", slabcache_obj->name);
5483
5547
5484
    if ((slabcache_obj->cache =
5548
    if ((slabcache_obj->cache =
5485
            kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL, NULL)))
5549
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
5486
    {
5550
         kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL, NULL)))
5551
#else
5552
         kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL)))
5553
#endif
5554
{
5487
        ret = 1;
5555
        ret = 1;
5488
    }
5556
    }
5489
5557
(-)common/lib/modules/fglrx/build_mod/firegl_public.h~ (-2 / +7 lines)
Lines 241-249 Link Here
241
/*****************************************************************************/
241
/*****************************************************************************/
242
242
243
typedef unsigned long __ke_dev_t;
243
typedef unsigned long __ke_dev_t;
244
typedef unsigned long __ke_size_t;
245
typedef unsigned long __ke_off_t;
244
typedef unsigned long __ke_off_t;
245
#ifdef __x86_64__
246
typedef long __ke_ssize_t;
246
typedef long __ke_ssize_t;
247
typedef unsigned long __ke_size_t;
248
#else
249
typedef int __ke_ssize_t;
250
typedef unsigned int __ke_size_t;
251
#endif
247
typedef unsigned char __ke_u8;
252
typedef unsigned char __ke_u8;
248
typedef unsigned short __ke_u16;
253
typedef unsigned short __ke_u16;
249
typedef unsigned int __ke_u32;
254
typedef unsigned int __ke_u32;
Lines 594-600 Link Here
594
extern char* ATI_API_CALL __ke_strchr(const char *s, int c);
599
extern char* ATI_API_CALL __ke_strchr(const char *s, int c);
595
extern int ATI_API_CALL __ke_sprintf(char* buf, const char* fmt, ...);
600
extern int ATI_API_CALL __ke_sprintf(char* buf, const char* fmt, ...);
596
extern int ATI_API_CALL __ke_snprintf(char* buf, size_t size, const char* fmt, ...);
601
extern int ATI_API_CALL __ke_snprintf(char* buf, size_t size, const char* fmt, ...);
597
extern int ATI_API_CALL __ke_vm_test_and_clear_dirty(struct mm_struct* mm, unsigned long virtual_addr);
602
extern int ATI_API_CALL KCL_TestAndClearPageDirtyFlag(struct mm_struct* mm, unsigned long virtual_addr);
598
extern unsigned long ATI_API_CALL __ke_do_mmap(struct file * file, unsigned long addr, unsigned long len, unsigned long pgoff);
603
extern unsigned long ATI_API_CALL __ke_do_mmap(struct file * file, unsigned long addr, unsigned long len, unsigned long pgoff);
599
extern int ATI_API_CALL __ke_do_munmap(unsigned long addr, unsigned long len);
604
extern int ATI_API_CALL __ke_do_munmap(unsigned long addr, unsigned long len);
600
extern void* ATI_API_CALL __ke_vmap(unsigned long *pagelist, unsigned int count);
605
extern void* ATI_API_CALL __ke_vmap(unsigned long *pagelist, unsigned int count);

Return to bug 196820