Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 78606 Details for
Bug 100703
request for linux-headers-2.6.12.5
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
patch to allow gcc-4.2 to build
gcc4-compile.patch (text/plain), 14.91 KB, created by
richard juckes
on 2006-01-31 17:35:34 UTC
(
hide
)
Description:
patch to allow gcc-4.2 to build
Filename:
MIME Type:
Creator:
richard juckes
Created:
2006-01-31 17:35:34 UTC
Size:
14.91 KB
patch
obsolete
>diff -Nru orig-linux-2.6.15-rc6/include/linux/compiler.h linux-2.6.15-rc6/include/linux/compiler.h >--- orig-linux-2.6.15-rc6/include/linux/compiler.h 2006-01-31 15:41:23.000000000 +0800 >+++ linux-2.6.15-rc6/include/linux/compiler.h 2006-01-31 15:42:43.000000000 +0800 >@@ -34,14 +34,7 @@ > # define __cond_lock(x) (x) > #endif > >-#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1) >-#ifndef asm >-# define asm __asm__ >-#endif >-#ifndef volatile >-# define volatile __volatile__ >-#endif >-#endif >+#ifdef __KERNEL__ > > #if __GNUC__ > 4 > #error no compiler-gcc.h file for this gcc version >@@ -49,8 +42,6 @@ > # include <linux/compiler-gcc4.h> > #elif __GNUC__ == 3 > # include <linux/compiler-gcc3.h> >-#elif __GNUC__ == 2 >-# include <linux/compiler-gcc2.h> > #else > # error Sorry, your compiler is too old/not recognized. > #endif >@@ -73,11 +64,7 @@ > > /* Optimization barrier */ > #ifndef barrier >-# ifdef mb >-# define barrier() mb() >-# else >-# define barrier() __asm__ __volatile__ ("" : : : "memory") >-# endif >+# define barrier() __memory_barrier() > #endif > > #ifndef RELOC_HIDE >@@ -87,6 +74,8 @@ > (typeof(ptr)) (__ptr + (off)); }) > #endif > >+#endif /* __KERNEL__ */ >+ > #endif /* __ASSEMBLY__ */ > > /* >@@ -156,14 +145,12 @@ > # define __attribute_const__ /* unimplemented */ > #endif > >-#ifndef __always_inline >-#define __always_inline inline >+#ifndef noinline >+#define noinline > #endif > >-#ifdef __cplusplus >-#define __cast__(_to) (_to) >-#else >-#define __cast__(_to) >+#ifndef __always_inline >+#define __always_inline inline > #endif > > #endif /* __LINUX_COMPILER_H */ > > >diff -Nru orig-linux-2.6.15-rc6/include/linux/prefetch.h linux-2.6.15-rc6/include/linux/prefetch.h >--- orig-linux-2.6.15-rc6/include/linux/prefetch.h 2006-01-31 15:41:23.000000000 +0800 >+++ linux-2.6.15-rc6/include/linux/prefetch.h 2006-01-31 15:42:43.000000000 +0800 >@@ -10,7 +10,6 @@ > #ifndef _LINUX_PREFETCH_H > #define _LINUX_PREFETCH_H > >-#include <linux/compiler.h> > #include <linux/types.h> > #include <asm/processor.h> > #include <asm/cache.h> >@@ -41,11 +40,11 @@ > */ > > #ifndef ARCH_HAS_PREFETCH >-static __inline__ void prefetch(const void *x) {;} >+static inline void prefetch(const void *x) {;} > #endif > > #ifndef ARCH_HAS_PREFETCHW >-static __inline__ void prefetchw(const void *x) {;} >+static inline void prefetchw(const void *x) {;} > #endif > > #ifndef ARCH_HAS_SPINLOCK_PREFETCH >@@ -56,13 +55,13 @@ > #define PREFETCH_STRIDE (4*L1_CACHE_BYTES) > #endif > >-static __inline__ void prefetch_range(void *addr, size_t len) >+static inline void prefetch_range(void *addr, size_t len) > { > #ifdef ARCH_HAS_PREFETCH > char *cp; >- char *end = __cast__(char *) addr + len; >+ char *end = addr + len; > >- for (cp = __cast__(char *) addr; cp < end; cp += PREFETCH_STRIDE) >+ for (cp = addr; cp < end; cp += PREFETCH_STRIDE) > prefetch(cp); > #endif > } > > >diff -Nru orig-linux-2.6.15-rc6/include/linux/list.h linux-2.6.15-rc6/include/linux/list.h >--- orig-linux-2.6.15-rc6/include/linux/list.h 2006-01-31 15:41:23.000000000 +0800 >+++ linux-2.6.15-rc6/include/linux/list.h 2006-01-31 15:42:43.000000000 +0800 >@@ -1,6 +1,8 @@ > #ifndef _LINUX_LIST_H > #define _LINUX_LIST_H > >+#ifdef __KERNEL__ >+ > #include <linux/stddef.h> > #include <linux/prefetch.h> > #include <asm/system.h> >@@ -36,39 +38,20 @@ > (ptr)->next = (ptr); (ptr)->prev = (ptr); \ > } while (0) > >-struct hlist_head { >- struct hlist_node *first; >-}; >- >-struct hlist_node { >- struct hlist_node *next, **pprev; >-}; >- >-/** >- * list_empty - tests whether a list is empty >- * @head: the list to test. >- */ >-static __inline__ int list_empty(const struct list_head *head) >-{ >- return head->next == head; >-} >- >-#if defined(__KERNEL__) || defined(__LINUX_KEYBOARD_H) >- > /* > * Insert a new entry between two known consecutive entries. > * > * This is only for internal list manipulation where we know > * the prev/next entries already! > */ >-static __inline__ void __list_add(struct list_head *_new, >+static inline void __list_add(struct list_head *new, > struct list_head *prev, > struct list_head *next) > { >- next->prev = _new; >- _new->next = next; >- _new->prev = prev; >- prev->next = _new; >+ next->prev = new; >+ new->next = next; >+ new->prev = prev; >+ prev->next = new; > } > > /** >@@ -79,9 +62,9 @@ > * Insert a new entry after the specified head. > * This is good for implementing stacks. > */ >-static __inline__ void list_add(struct list_head *_new, struct list_head *head) >+static inline void list_add(struct list_head *new, struct list_head *head) > { >- __list_add(_new, head, head->next); >+ __list_add(new, head, head->next); > } > > /** >@@ -92,9 +75,9 @@ > * Insert a new entry before the specified head. > * This is useful for implementing queues. > */ >-static __inline__ void list_add_tail(struct list_head *_new, struct list_head *head) >+static inline void list_add_tail(struct list_head *new, struct list_head *head) > { >- __list_add(_new, head->prev, head); >+ __list_add(new, head->prev, head); > } > > /* >@@ -103,14 +86,14 @@ > * This is only for internal list manipulation where we know > * the prev/next entries already! > */ >-static __inline__ void __list_add_rcu(struct list_head * _new, >+static inline void __list_add_rcu(struct list_head * new, > struct list_head * prev, struct list_head * next) > { >- _new->next = next; >- _new->prev = prev; >+ new->next = next; >+ new->prev = prev; > smp_wmb(); >- next->prev = _new; >- prev->next = _new; >+ next->prev = new; >+ prev->next = new; > } > > /** >@@ -129,9 +112,9 @@ > * the _rcu list-traversal primitives, such as > * list_for_each_entry_rcu(). > */ >-static __inline__ void list_add_rcu(struct list_head *_new, struct list_head *head) >+static inline void list_add_rcu(struct list_head *new, struct list_head *head) > { >- __list_add_rcu(_new, head, head->next); >+ __list_add_rcu(new, head, head->next); > } > > /** >@@ -150,10 +133,10 @@ > * the _rcu list-traversal primitives, such as > * list_for_each_entry_rcu(). > */ >-static __inline__ void list_add_tail_rcu(struct list_head *_new, >+static inline void list_add_tail_rcu(struct list_head *new, > struct list_head *head) > { >- __list_add_rcu(_new, head->prev, head); >+ __list_add_rcu(new, head->prev, head); > } > > /* >@@ -163,7 +146,7 @@ > * This is only for internal list manipulation where we know > * the prev/next entries already! > */ >-static __inline__ void __list_del(struct list_head * prev, struct list_head * next) >+static inline void __list_del(struct list_head * prev, struct list_head * next) > { > next->prev = prev; > prev->next = next; >@@ -175,11 +158,11 @@ > * Note: list_empty on entry does not return true after this, the entry is > * in an undefined state. > */ >-static __inline__ void list_del(struct list_head *entry) >+static inline void list_del(struct list_head *entry) > { > __list_del(entry->prev, entry->next); >- entry->next = __cast__(list_head*) LIST_POISON1; >- entry->prev = __cast__(list_head*) LIST_POISON2; >+ entry->next = LIST_POISON1; >+ entry->prev = LIST_POISON2; > } > > /** >@@ -206,10 +189,10 @@ > * or call_rcu() must be used to defer freeing until an RCU > * grace period has elapsed. > */ >-static __inline__ void list_del_rcu(struct list_head *entry) >+static inline void list_del_rcu(struct list_head *entry) > { > __list_del(entry->prev, entry->next); >- entry->prev = __cast__(list_head*) LIST_POISON2; >+ entry->prev = LIST_POISON2; > } > > /* >@@ -219,14 +202,14 @@ > * > * The old entry will be replaced with the new entry atomically. > */ >-static __inline__ void list_replace_rcu(struct list_head *old, >- struct list_head *_new) >+static inline void list_replace_rcu(struct list_head *old, >+ struct list_head *new) > { >- _new->next = old->next; >- _new->prev = old->prev; >+ new->next = old->next; >+ new->prev = old->prev; > smp_wmb(); >- _new->next->prev = _new; >- _new->prev->next = _new; >+ new->next->prev = new; >+ new->prev->next = new; > old->prev = LIST_POISON2; > } > >@@ -234,7 +217,7 @@ > * list_del_init - deletes entry from list and reinitialize it. > * @entry: the element to delete from the list. > */ >-static __inline__ void list_del_init(struct list_head *entry) >+static inline void list_del_init(struct list_head *entry) > { > __list_del(entry->prev, entry->next); > INIT_LIST_HEAD(entry); >@@ -245,7 +228,7 @@ > * @list: the entry to move > * @head: the head that will precede our entry > */ >-static __inline__ void list_move(struct list_head *list, struct list_head *head) >+static inline void list_move(struct list_head *list, struct list_head *head) > { > __list_del(list->prev, list->next); > list_add(list, head); >@@ -256,7 +239,7 @@ > * @list: the entry to move > * @head: the head that will follow our entry > */ >-static __inline__ void list_move_tail(struct list_head *list, >+static inline void list_move_tail(struct list_head *list, > struct list_head *head) > { > __list_del(list->prev, list->next); >@@ -264,6 +247,15 @@ > } > > /** >+ * list_empty - tests whether a list is empty >+ * @head: the list to test. >+ */ >+static inline int list_empty(const struct list_head *head) >+{ >+ return head->next == head; >+} >+ >+/** > * list_empty_careful - tests whether a list is > * empty _and_ checks that no other CPU might be > * in the process of still modifying either member >@@ -275,13 +267,13 @@ > * > * @head: the list to test. > */ >-static __inline__ int list_empty_careful(const struct list_head *head) >+static inline int list_empty_careful(const struct list_head *head) > { > struct list_head *next = head->next; > return (next == head) && (next == head->prev); > } > >-static __inline__ void __list_splice(struct list_head *list, >+static inline void __list_splice(struct list_head *list, > struct list_head *head) > { > struct list_head *first = list->next; >@@ -300,7 +292,7 @@ > * @list: the new list to add. > * @head: the place to add it in the first list. > */ >-static __inline__ void list_splice(struct list_head *list, struct list_head *head) >+static inline void list_splice(struct list_head *list, struct list_head *head) > { > if (!list_empty(list)) > __list_splice(list, head); >@@ -313,7 +305,7 @@ > * > * The list at @list is reinitialised > */ >-static __inline__ void list_splice_init(struct list_head *list, >+static inline void list_splice_init(struct list_head *list, > struct list_head *head) > { > if (!list_empty(list)) { >@@ -444,6 +436,20 @@ > pos = n, n = list_entry(n->member.next, typeof(*n), member)) > > /** >+ * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against >+ * removal of list entry >+ * @pos: the type * to use as a loop counter. >+ * @n: another type * to use as temporary storage >+ * @head: the head for your list. >+ * @member: the name of the list_struct within the struct. >+ */ >+#define list_for_each_entry_safe_reverse(pos, n, head, member) \ >+ for (pos = list_entry((head)->prev, typeof(*pos), member), \ >+ n = list_entry(pos->member.prev, typeof(*pos), member); \ >+ &pos->member != (head); \ >+ pos = n, n = list_entry(n->member.prev, typeof(*n), member)) >+ >+/** > * list_for_each_rcu - iterate over an rcu-protected list > * @pos: the &struct list_head to use as a loop counter. > * @head: the head for your list. >@@ -517,35 +523,43 @@ > * You lose the ability to access the tail in O(1). > */ > >+struct hlist_head { >+ struct hlist_node *first; >+}; >+ >+struct hlist_node { >+ struct hlist_node *next, **pprev; >+}; >+ > #define HLIST_HEAD_INIT { .first = NULL } > #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } > #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) > #define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL) > >-static __inline__ int hlist_unhashed(const struct hlist_node *h) >+static inline int hlist_unhashed(const struct hlist_node *h) > { > return !h->pprev; > } > >-static __inline__ int hlist_empty(const struct hlist_head *h) >+static inline int hlist_empty(const struct hlist_head *h) > { > return !h->first; > } > >-static __inline__ void __hlist_del(struct hlist_node *n) >+static inline void __hlist_del(struct hlist_node *n) > { >- struct hlist_node *next = __cast__(hlist_node*) n->next; >- struct hlist_node **pprev = __cast__(hlist_node**) n->pprev; >+ struct hlist_node *next = n->next; >+ struct hlist_node **pprev = n->pprev; > *pprev = next; > if (next) > next->pprev = pprev; > } > >-static __inline__ void hlist_del(struct hlist_node *n) >+static inline void hlist_del(struct hlist_node *n) > { > __hlist_del(n); >- n->next = __cast__(hlist_node*) LIST_POISON1; >- n->pprev = __cast__(hlist_node**) LIST_POISON2; >+ n->next = LIST_POISON1; >+ n->pprev = LIST_POISON2; > } > > /** >@@ -567,13 +581,13 @@ > * the _rcu list-traversal primitives, such as > * hlist_for_each_entry(). > */ >-static __inline__ void hlist_del_rcu(struct hlist_node *n) >+static inline void hlist_del_rcu(struct hlist_node *n) > { > __hlist_del(n); >- n->pprev = __cast__(hlist_node**) LIST_POISON2; >+ n->pprev = LIST_POISON2; > } > >-static __inline__ void hlist_del_init(struct hlist_node *n) >+static inline void hlist_del_init(struct hlist_node *n) > { > if (n->pprev) { > __hlist_del(n); >@@ -588,7 +602,7 @@ > * > * The old entry will be replaced with the new entry atomically. > */ >-static __inline__ void hlist_replace_rcu(struct hlist_node *old, >+static inline void hlist_replace_rcu(struct hlist_node *old, > struct hlist_node *new) > { > struct hlist_node *next = old->next; >@@ -602,7 +616,7 @@ > old->pprev = LIST_POISON2; > } > >-static __inline__ void hlist_add_head(struct hlist_node *n, struct hlist_head *h) >+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) > { > struct hlist_node *first = h->first; > n->next = first; >@@ -629,7 +643,7 @@ > * problems on Alpha CPUs. Regardless of the type of CPU, the > * list-traversal primitive must be guarded by rcu_read_lock(). > */ >-static __inline__ void hlist_add_head_rcu(struct hlist_node *n, >+static inline void hlist_add_head_rcu(struct hlist_node *n, > struct hlist_head *h) > { > struct hlist_node *first = h->first; >@@ -642,7 +656,7 @@ > } > > /* next must be != NULL */ >-static __inline__ void hlist_add_before(struct hlist_node *n, >+static inline void hlist_add_before(struct hlist_node *n, > struct hlist_node *next) > { > n->pprev = next->pprev; >@@ -651,7 +665,7 @@ > *(n->pprev) = n; > } > >-static __inline__ void hlist_add_after(struct hlist_node *n, >+static inline void hlist_add_after(struct hlist_node *n, > struct hlist_node *next) > { > next->next = n->next; >@@ -677,7 +691,7 @@ > * hlist_for_each_entry_rcu(), used to prevent memory-consistency > * problems on Alpha CPUs. > */ >-static __inline__ void hlist_add_before_rcu(struct hlist_node *n, >+static inline void hlist_add_before_rcu(struct hlist_node *n, > struct hlist_node *next) > { > n->pprev = next->pprev; >@@ -702,7 +716,7 @@ > * hlist_for_each_entry_rcu(), used to prevent memory-consistency > * problems on Alpha CPUs. > */ >-static __inline__ void hlist_add_after_rcu(struct hlist_node *prev, >+static inline void hlist_add_after_rcu(struct hlist_node *prev, > struct hlist_node *n) > { > n->next = prev->next; >@@ -790,5 +804,7 @@ > ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ > pos = pos->next) > >-#endif >+#else >+#warning "don't include kernel headers in userspace" >+#endif /* __KERNEL__ */ > #endif
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 100703
:
78606
|
78689