Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 21269 | Differences between
and this patch

Collapse All | Expand All

(-)a/include/linux/jhash.h (+161 lines)
Added Link Here
1
#ifndef _LINUX_JHASH_H
2
#define _LINUX_JHASH_H
3
4
/* jhash.h: Jenkins hash support.
5
 *
6
 * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
7
 *
8
 * http://burtleburtle.net/bob/hash/
9
 *
10
 * These are the credits from Bob's sources:
11
 *
12
 * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
13
 * hash(), hash2(), hash3, and mix() are externally useful functions.
14
 * Routines to test the hash are included if SELF_TEST is defined.
15
 * You can use this free for any purpose.  It has no warranty.
16
 *
17
 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
18
 *
19
 * I've modified Bob's hash to be useful in the Linux kernel, and
20
 * any bugs present are surely my fault.  -DaveM
21
 */
22
23
/* NOTE: Arguments are modified. */
24
#define __jhash_mix(a, b, c) \
25
{ \
26
  a -= b; a -= c; a ^= (c>>13); \
27
  b -= c; b -= a; b ^= (a<<8); \
28
  c -= a; c -= b; c ^= (b>>13); \
29
  a -= b; a -= c; a ^= (c>>12);  \
30
  b -= c; b -= a; b ^= (a<<16); \
31
  c -= a; c -= b; c ^= (b>>5); \
32
  a -= b; a -= c; a ^= (c>>3);  \
33
  b -= c; b -= a; b ^= (a<<10); \
34
  c -= a; c -= b; c ^= (b>>15); \
35
}
36
37
/* The golden ration: an arbitrary value */
38
#define JHASH_GOLDEN_RATIO	0x9e3779b9
39
40
/* The most generic version, hashes an arbitrary sequence
41
 * of bytes.  No alignment or length assumptions are made about
42
 * the input key.
43
 */
44
static __inline__ u32 jenkins_hash(void *key, u32 length, u32 initval)
45
{
46
	u32 a, b, c, len;
47
	u8 *k = key;
48
49
	len = length;
50
	a = b = JHASH_GOLDEN_RATIO;
51
	c = initval;
52
53
	while (len >= 12) {
54
		a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
55
		b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
56
		c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
57
58
		__jhash_mix(a,b,c);
59
60
		k += 12;
61
		len -= 12;
62
	}
63
64
	c += length;
65
	switch (len) {
66
	case 11: c += ((u32)k[10]<<24);
67
	case 10: c += ((u32)k[9]<<16);
68
	case 9 : c += ((u32)k[8]<<8);
69
	case 8 : b += ((u32)k[7]<<24);
70
	case 7 : b += ((u32)k[6]<<16);
71
	case 6 : b += ((u32)k[5]<<8);
72
	case 5 : b += k[4];
73
	case 4 : a += ((u32)k[3]<<24);
74
	case 3 : a += ((u32)k[2]<<16);
75
	case 2 : a += ((u32)k[1]<<8);
76
	case 1 : a += k[0];
77
	};
78
79
	__jhash_mix(a,b,c);
80
81
	return c;
82
}
83
84
/* A special optimized version that handles 1 or more of u32s.
85
 * The length parameter here is the number of u32s in the key.
86
 */
87
static __inline__ u32 hash2(u32 *k, u32 length, u32 initval)
88
{
89
	u32 a, b, c, len;
90
91
	a = b = JHASH_GOLDEN_RATIO;
92
	c = initval;
93
	len = length;
94
95
	while (len >= 3) {
96
		a += k[0];
97
		b += k[1];
98
		c += k[2];
99
		__jhash_mix(a, b, c);
100
		k += 3; len -= 3;
101
	}
102
103
	c += length * 4;
104
105
	switch (len) {
106
	case 2 : b += k[1];
107
	case 1 : a += k[0];
108
	};
109
110
	__jhash_mix(a,b,c);
111
112
	return c;
113
}
114
115
116
/* A special ultra-optimized versions that knows they are hashing exactly
117
 * 3, 2 or 1 word(s).
118
 *
119
 * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
120
 *       done at the end is not done here.
121
 */
122
static __inline__ u32 jenkins_hash_3words(u32 a, u32 b, u32 c,
123
					  u32 initval)
124
{
125
	a += JHASH_GOLDEN_RATIO;
126
	b += JHASH_GOLDEN_RATIO;
127
	c += initval;
128
129
	__jhash_mix(a, b, c);
130
131
	return c;
132
}
133
134
static __inline__ u32 jenkins_hash_2words(u32 a, u32 b, u32 initval)
135
{
136
	u32 c = 0;
137
138
	a += JHASH_GOLDEN_RATIO;
139
	b += JHASH_GOLDEN_RATIO;
140
	c += initval;
141
142
	__jhash_mix(a, b, c);
143
144
	return c;
145
}
146
147
static __inline__ u32 jenkins_hash_1word(u32 a, u32 initval)
148
{
149
	u32 b = 0;
150
	u32 c = 0;
151
152
	a += JHASH_GOLDEN_RATIO;
153
	b += JHASH_GOLDEN_RATIO;
154
	c += initval;
155
156
	__jhash_mix(a, b, c);
157
158
	return c;
159
}
160
161
#endif /* _LINUX_JHASH_H */
(-)a/include/linux/sysctl.h (-1 / +2 lines)
Lines 315-321 Link Here
315
	NET_IPV4_ROUTE_GC_ELASTICITY=14,
315
	NET_IPV4_ROUTE_GC_ELASTICITY=14,
316
	NET_IPV4_ROUTE_MTU_EXPIRES=15,
316
	NET_IPV4_ROUTE_MTU_EXPIRES=15,
317
	NET_IPV4_ROUTE_MIN_PMTU=16,
317
	NET_IPV4_ROUTE_MIN_PMTU=16,
318
	NET_IPV4_ROUTE_MIN_ADVMSS=17
318
	NET_IPV4_ROUTE_MIN_ADVMSS=17,
319
	NET_IPV4_ROUTE_SECRET_INTERVAL=18,
319
};
320
};
320
321
321
enum
322
enum
(-)a/include/net/tcp.h (+1 lines)
Lines 1604-1609 Link Here
1604
	int			qlen;
1604
	int			qlen;
1605
	int			qlen_young;
1605
	int			qlen_young;
1606
	int			clock_hand;
1606
	int			clock_hand;
1607
	u32			hash_rnd;
1607
	struct open_request	*syn_table[TCP_SYNQ_HSIZE];
1608
	struct open_request	*syn_table[TCP_SYNQ_HSIZE];
1608
};
1609
};
1609
1610
(-)a/net/ipv4/netfilter/ip_conntrack_core.c (-12 / +19 lines)
Lines 28-33 Link Here
28
#include <linux/stddef.h>
28
#include <linux/stddef.h>
29
#include <linux/sysctl.h>
29
#include <linux/sysctl.h>
30
#include <linux/slab.h>
30
#include <linux/slab.h>
31
#include <linux/random.h>
32
#include <linux/jhash.h>
31
/* For ERR_PTR().  Yeah, I know... --RR */
33
/* For ERR_PTR().  Yeah, I know... --RR */
32
#include <linux/fs.h>
34
#include <linux/fs.h>
33
35
Lines 104-123 Link Here
104
	nf_conntrack_put(&ct->infos[0]);
106
	nf_conntrack_put(&ct->infos[0]);
105
}
107
}
106
108
107
static inline u_int32_t
109
static int ip_conntrack_hash_rnd_initted;
110
static unsigned int ip_conntrack_hash_rnd;
111
112
static u_int32_t
108
hash_conntrack(const struct ip_conntrack_tuple *tuple)
113
hash_conntrack(const struct ip_conntrack_tuple *tuple)
109
{
114
{
110
#if 0
115
#if 0
111
	dump_tuple(tuple);
116
	dump_tuple(tuple);
112
#endif
117
#endif
113
	/* ntohl because more differences in low bits. */
118
	return (jenkins_hash_3words(tuple->src.ip,
114
	/* To ensure that halves of the same connection don't hash
119
				    (tuple->dst.ip ^ tuple->dst.protonum),
115
	   clash, we add the source per-proto again. */
120
				    (tuple->src.u.all |
116
	return (ntohl(tuple->src.ip + tuple->dst.ip
121
				     (tuple->dst.u.all << 16)),
117
		     + tuple->src.u.all + tuple->dst.u.all
122
				    ip_conntrack_hash_rnd)
118
		     + tuple->dst.protonum)
123
		% ip_conntrack_htable_size);
119
		+ ntohs(tuple->src.u.all))
120
		% ip_conntrack_htable_size;
121
}
124
}
122
125
123
inline int
126
inline int
Lines 633-643 Link Here
633
{
636
{
634
	struct ip_conntrack *conntrack;
637
	struct ip_conntrack *conntrack;
635
	struct ip_conntrack_tuple repl_tuple;
638
	struct ip_conntrack_tuple repl_tuple;
636
	size_t hash, repl_hash;
639
	size_t hash;
637
	struct ip_conntrack_expect *expected;
640
	struct ip_conntrack_expect *expected;
638
	int i;
641
	int i;
639
	static unsigned int drop_next = 0;
642
	static unsigned int drop_next = 0;
640
643
644
	if (!ip_conntrack_hash_rnd_initted) {
645
		get_random_bytes(&ip_conntrack_hash_rnd, 4);
646
		ip_conntrack_hash_rnd_initted = 1;
647
	}
648
641
	hash = hash_conntrack(tuple);
649
	hash = hash_conntrack(tuple);
642
650
643
	if (ip_conntrack_max &&
651
	if (ip_conntrack_max &&
Lines 661-667 Link Here
661
		DEBUGP("Can't invert tuple.\n");
669
		DEBUGP("Can't invert tuple.\n");
662
		return NULL;
670
		return NULL;
663
	}
671
	}
664
	repl_hash = hash_conntrack(&repl_tuple);
665
672
666
	conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
673
	conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
667
	if (!conntrack) {
674
	if (!conntrack) {
Lines 1428-1434 Link Here
1428
	ip_conntrack_max = 8 * ip_conntrack_htable_size;
1435
	ip_conntrack_max = 8 * ip_conntrack_htable_size;
1429
1436
1430
	printk("ip_conntrack version %s (%u buckets, %d max)"
1437
	printk("ip_conntrack version %s (%u buckets, %d max)"
1431
	       " - %d bytes per conntrack\n", IP_CONNTRACK_VERSION,
1438
	       " - %Zd bytes per conntrack\n", IP_CONNTRACK_VERSION,
1432
	       ip_conntrack_htable_size, ip_conntrack_max,
1439
	       ip_conntrack_htable_size, ip_conntrack_max,
1433
	       sizeof(struct ip_conntrack));
1440
	       sizeof(struct ip_conntrack));
1434
1441
(-)a/net/ipv4/route.c (-16 / +52 lines)
Lines 85-90 Link Here
85
#include <linux/mroute.h>
85
#include <linux/mroute.h>
86
#include <linux/netfilter_ipv4.h>
86
#include <linux/netfilter_ipv4.h>
87
#include <linux/random.h>
87
#include <linux/random.h>
88
#include <linux/jhash.h>
88
#include <net/protocol.h>
89
#include <net/protocol.h>
89
#include <net/ip.h>
90
#include <net/ip.h>
90
#include <net/route.h>
91
#include <net/route.h>
Lines 117-129 Link Here
117
int ip_rt_mtu_expires		= 10 * 60 * HZ;
118
int ip_rt_mtu_expires		= 10 * 60 * HZ;
118
int ip_rt_min_pmtu		= 512 + 20 + 20;
119
int ip_rt_min_pmtu		= 512 + 20 + 20;
119
int ip_rt_min_advmss		= 256;
120
int ip_rt_min_advmss		= 256;
120
121
int ip_rt_secret_interval	= 10 * 60 * HZ;
121
static unsigned long rt_deadline;
122
static unsigned long rt_deadline;
122
123
123
#define RTprint(a...)	printk(KERN_DEBUG a)
124
#define RTprint(a...)	printk(KERN_DEBUG a)
124
125
125
static struct timer_list rt_flush_timer;
126
static struct timer_list rt_flush_timer;
126
static struct timer_list rt_periodic_timer;
127
static struct timer_list rt_periodic_timer;
128
static struct timer_list rt_secret_timer;
127
129
128
/*
130
/*
129
 *	Interface to generic destination cache.
131
 *	Interface to generic destination cache.
Lines 194-212 Link Here
194
static struct rt_hash_bucket 	*rt_hash_table;
196
static struct rt_hash_bucket 	*rt_hash_table;
195
static unsigned			rt_hash_mask;
197
static unsigned			rt_hash_mask;
196
static int			rt_hash_log;
198
static int			rt_hash_log;
199
static unsigned int		rt_hash_rnd;
197
200
198
struct rt_cache_stat rt_cache_stat[NR_CPUS];
201
struct rt_cache_stat rt_cache_stat[NR_CPUS];
199
202
200
static int rt_intern_hash(unsigned hash, struct rtable *rth,
203
static int rt_intern_hash(unsigned hash, struct rtable *rth,
201
				struct rtable **res);
204
				struct rtable **res);
202
205
203
static __inline__ unsigned rt_hash_code(u32 daddr, u32 saddr, u8 tos)
206
static unsigned int rt_hash_code(u32 daddr, u32 saddr, u8 tos)
204
{
207
{
205
	unsigned hash = ((daddr & 0xF0F0F0F0) >> 4) |
208
	return (jenkins_hash_3words(daddr, saddr, (u32) tos, rt_hash_rnd)
206
			((daddr & 0x0F0F0F0F) << 4);
209
		& rt_hash_mask);
207
	hash ^= saddr ^ tos;
208
	hash ^= (hash >> 16);
209
	return (hash ^ (hash >> 8)) & rt_hash_mask;
210
}
210
}
211
211
212
static int rt_cache_get_info(char *buffer, char **start, off_t offset,
212
static int rt_cache_get_info(char *buffer, char **start, off_t offset,
Lines 479-484 Link Here
479
	spin_unlock_bh(&rt_flush_lock);
479
	spin_unlock_bh(&rt_flush_lock);
480
}
480
}
481
481
482
static void rt_secret_rebuild(unsigned long dummy)
483
{
484
	unsigned long now = jiffies;
485
486
	get_random_bytes(&rt_hash_rnd, 4);
487
	rt_cache_flush(0);
488
	mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
489
}
490
482
/*
491
/*
483
   Short description of GC goals.
492
   Short description of GC goals.
484
493
Lines 2414-2419 Link Here
2414
		mode:		0644,
2423
		mode:		0644,
2415
		proc_handler:	&proc_dointvec,
2424
		proc_handler:	&proc_dointvec,
2416
	},
2425
	},
2426
	{
2427
		ctl_name:	NET_IPV4_ROUTE_SECRET_INTERVAL,
2428
		procname:	"secret_interval",
2429
		data:		&ip_rt_secret_interval,
2430
		maxlen:		sizeof(int),
2431
		mode:		0644,
2432
		proc_handler:	&proc_dointvec_jiffies,
2433
		strategy:	&sysctl_jiffies,
2434
	},
2417
	 { 0 }
2435
	 { 0 }
2418
};
2436
};
2419
#endif
2437
#endif
Lines 2444-2458 Link Here
2444
		*eof = 1;
2462
		*eof = 1;
2445
	}
2463
	}
2446
2464
2447
	/* Copy first cpu. */
2465
	offset /= sizeof(u32);
2448
	*start = buffer;
2466
2449
	memcpy(buffer, IP_RT_ACCT_CPU(0), length);
2467
	if (length > 0) {
2450
2468
		u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
2451
	/* Add the other cpus in, one int at a time */
2469
		u32 *dst = (u32 *) buffer;
2452
	for (i = 1; i < smp_num_cpus; i++) {
2470
2453
		unsigned int j;
2471
		/* Copy first cpu. */
2454
		for (j = 0; j < length/4; j++)
2472
		*start = buffer;
2455
			((u32*)buffer)[j] += ((u32*)IP_RT_ACCT_CPU(i))[j];
2473
		memcpy(dst, src, length);
2474
2475
		/* Add the other cpus in, one int at a time */
2476
		for (i = 1; i < smp_num_cpus; i++) {
2477
			unsigned int j;
2478
2479
			src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
2480
2481
			for (j = 0; j < length/4; j++)
2482
				dst[j] += src[j];
2483
		}
2456
	}
2484
	}
2457
	return length;
2485
	return length;
2458
}
2486
}
Lines 2462-2467 Link Here
2462
{
2490
{
2463
	int i, order, goal;
2491
	int i, order, goal;
2464
2492
2493
	rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
2494
			     (jiffies ^ (jiffies >> 7)));
2495
2465
#ifdef CONFIG_NET_CLS_ROUTE
2496
#ifdef CONFIG_NET_CLS_ROUTE
2466
	for (order = 0;
2497
	for (order = 0;
2467
	     (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
2498
	     (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
Lines 2518-2523 Link Here
2518
2549
2519
	rt_flush_timer.function = rt_run_flush;
2550
	rt_flush_timer.function = rt_run_flush;
2520
	rt_periodic_timer.function = rt_check_expire;
2551
	rt_periodic_timer.function = rt_check_expire;
2552
	rt_secret_timer.function = rt_secret_rebuild;
2521
2553
2522
	/* All the timers, started at system startup tend
2554
	/* All the timers, started at system startup tend
2523
	   to synchronize. Perturb it a bit.
2555
	   to synchronize. Perturb it a bit.
Lines 2525-2530 Link Here
2525
	rt_periodic_timer.expires = jiffies + net_random() % ip_rt_gc_interval +
2557
	rt_periodic_timer.expires = jiffies + net_random() % ip_rt_gc_interval +
2526
					ip_rt_gc_interval;
2558
					ip_rt_gc_interval;
2527
	add_timer(&rt_periodic_timer);
2559
	add_timer(&rt_periodic_timer);
2560
2561
	rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval +
2562
		ip_rt_secret_interval;
2563
	add_timer(&rt_secret_timer);
2528
2564
2529
	proc_net_create ("rt_cache", 0, rt_cache_get_info);
2565
	proc_net_create ("rt_cache", 0, rt_cache_get_info);
2530
	proc_net_create ("rt_cache_stat", 0, rt_cache_stat_get_info);
2566
	proc_net_create ("rt_cache_stat", 0, rt_cache_stat_get_info);
(-)a/net/ipv4/tcp.c (+2 lines)
Lines 252-257 Link Here
252
#include <linux/init.h>
252
#include <linux/init.h>
253
#include <linux/smp_lock.h>
253
#include <linux/smp_lock.h>
254
#include <linux/fs.h>
254
#include <linux/fs.h>
255
#include <linux/random.h>
255
256
256
#include <net/icmp.h>
257
#include <net/icmp.h>
257
#include <net/tcp.h>
258
#include <net/tcp.h>
Lines 542-547 Link Here
542
	for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
543
	for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
543
		if ((1<<lopt->max_qlen_log) >= sysctl_max_syn_backlog)
544
		if ((1<<lopt->max_qlen_log) >= sysctl_max_syn_backlog)
544
			break;
545
			break;
546
	get_random_bytes(&lopt->hash_rnd, 4);
545
547
546
	write_lock_bh(&tp->syn_wait_lock);
548
	write_lock_bh(&tp->syn_wait_lock);
547
	tp->listen_opt = lopt;
549
	tp->listen_opt = lopt;
(-)a/net/ipv4/tcp_ipv4.c (-7 / +6 lines)
Lines 56-61 Link Here
56
#include <linux/fcntl.h>
56
#include <linux/fcntl.h>
57
#include <linux/random.h>
57
#include <linux/random.h>
58
#include <linux/cache.h>
58
#include <linux/cache.h>
59
#include <linux/jhash.h>
59
#include <linux/init.h>
60
#include <linux/init.h>
60
61
61
#include <net/icmp.h>
62
#include <net/icmp.h>
Lines 868-879 Link Here
868
	return ((struct rtable*)skb->dst)->rt_iif;
869
	return ((struct rtable*)skb->dst)->rt_iif;
869
}
870
}
870
871
871
static __inline__ unsigned tcp_v4_synq_hash(u32 raddr, u16 rport)
872
static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
872
{
873
{
873
	unsigned h = raddr ^ rport;
874
	return (jenkins_hash_2words(raddr, (u32) rport, rnd)
874
	h ^= h>>16;
875
		& (TCP_SYNQ_HSIZE - 1));
875
	h ^= h>>8;
876
	return h&(TCP_SYNQ_HSIZE-1);
877
}
876
}
878
877
879
static struct open_request *tcp_v4_search_req(struct tcp_opt *tp, 
878
static struct open_request *tcp_v4_search_req(struct tcp_opt *tp, 
Lines 884-890 Link Here
884
	struct tcp_listen_opt *lopt = tp->listen_opt;
883
	struct tcp_listen_opt *lopt = tp->listen_opt;
885
	struct open_request *req, **prev;  
884
	struct open_request *req, **prev;  
886
885
887
	for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport)];
886
	for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
888
	     (req = *prev) != NULL;
887
	     (req = *prev) != NULL;
889
	     prev = &req->dl_next) {
888
	     prev = &req->dl_next) {
890
		if (req->rmt_port == rport &&
889
		if (req->rmt_port == rport &&
Lines 904-910 Link Here
904
{
903
{
905
	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
904
	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
906
	struct tcp_listen_opt *lopt = tp->listen_opt;
905
	struct tcp_listen_opt *lopt = tp->listen_opt;
907
	unsigned h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port);
906
	u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
908
907
909
	req->expires = jiffies + TCP_TIMEOUT_INIT;
908
	req->expires = jiffies + TCP_TIMEOUT_INIT;
910
	req->retrans = 0;
909
	req->retrans = 0;
(-)a/net/ipv6/tcp_ipv6.c (-8 / +8 lines)
Lines 36-41 Link Here
36
#include <linux/in6.h>
36
#include <linux/in6.h>
37
#include <linux/netdevice.h>
37
#include <linux/netdevice.h>
38
#include <linux/init.h>
38
#include <linux/init.h>
39
#include <linux/jhash.h>
39
#include <linux/ipsec.h>
40
#include <linux/ipsec.h>
40
41
41
#include <linux/ipv6.h>
42
#include <linux/ipv6.h>
Lines 368-379 Link Here
368
 * Open request hash tables.
369
 * Open request hash tables.
369
 */
370
 */
370
371
371
static __inline__ unsigned tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport)
372
static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
372
{
373
{
373
	unsigned h = raddr->s6_addr32[3] ^ rport;
374
	return (jenkins_hash_3words(raddr->s6_addr32[0] ^ raddr->s6_addr32[1],
374
	h ^= h>>16;
375
				    raddr->s6_addr32[2] ^ raddr->s6_addr32[3],
375
	h ^= h>>8;
376
				    (u32) rport, rnd)
376
	return h&(TCP_SYNQ_HSIZE-1);
377
		& (TCP_SYNQ_HSIZE - 1));
377
}
378
}
378
379
379
static struct open_request *tcp_v6_search_req(struct tcp_opt *tp,
380
static struct open_request *tcp_v6_search_req(struct tcp_opt *tp,
Lines 386-392 Link Here
386
	struct tcp_listen_opt *lopt = tp->listen_opt;
387
	struct tcp_listen_opt *lopt = tp->listen_opt;
387
	struct open_request *req, **prev;  
388
	struct open_request *req, **prev;  
388
389
389
	for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport)];
390
	for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
390
	     (req = *prev) != NULL;
391
	     (req = *prev) != NULL;
391
	     prev = &req->dl_next) {
392
	     prev = &req->dl_next) {
392
		if (req->rmt_port == rport &&
393
		if (req->rmt_port == rport &&
Lines 1135-1141 Link Here
1135
{
1136
{
1136
	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1137
	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1137
	struct tcp_listen_opt *lopt = tp->listen_opt;
1138
	struct tcp_listen_opt *lopt = tp->listen_opt;
1138
	unsigned h = tcp_v6_synq_hash(&req->af.v6_req.rmt_addr, req->rmt_port);
1139
	u32 h = tcp_v6_synq_hash(&req->af.v6_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
1139
1140
1140
	req->sk = NULL;
1141
	req->sk = NULL;
1141
	req->expires = jiffies + TCP_TIMEOUT_INIT;
1142
	req->expires = jiffies + TCP_TIMEOUT_INIT;
1142

Return to bug 21269