Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 184852 | Differences between
and this patch

Collapse All | Expand All

(-)a/arch/arm/kernel/traps.c (-2 / +3 lines)
Lines 273-278 asmlinkage void do_undefinstr(struct pt_regs *regs) Link Here
273
	struct undef_hook *hook;
273
	struct undef_hook *hook;
274
	siginfo_t info;
274
	siginfo_t info;
275
	void __user *pc;
275
	void __user *pc;
276
	unsigned long flags;
276
277
277
	/*
278
	/*
278
	 * According to the ARM ARM, PC is 2 or 4 bytes ahead,
279
	 * According to the ARM ARM, PC is 2 or 4 bytes ahead,
Lines 291-297 asmlinkage void do_undefinstr(struct pt_regs *regs) Link Here
291
		get_user(instr, (u32 __user *)pc);
292
		get_user(instr, (u32 __user *)pc);
292
	}
293
	}
293
294
294
	spin_lock_irq(&undef_lock);
295
	spin_lock_irqsave(&undef_lock, flags);
295
	list_for_each_entry(hook, &undef_hook, node) {
296
	list_for_each_entry(hook, &undef_hook, node) {
296
		if ((instr & hook->instr_mask) == hook->instr_val &&
297
		if ((instr & hook->instr_mask) == hook->instr_val &&
297
		    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) {
298
		    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) {
Lines 301-307 asmlinkage void do_undefinstr(struct pt_regs *regs) Link Here
301
			}
302
			}
302
		}
303
		}
303
	}
304
	}
304
	spin_unlock_irq(&undef_lock);
305
	spin_unlock_irqrestore(&undef_lock, flags);
305
306
306
#ifdef CONFIG_DEBUG_USER
307
#ifdef CONFIG_DEBUG_USER
307
	if (user_debug & UDBG_UNDEFINED) {
308
	if (user_debug & UDBG_UNDEFINED) {
(-)a/arch/arm/mach-iop13xx/pci.c (-4 / +4 lines)
Lines 1023-1029 int iop13xx_pci_setup(int nr, struct pci_sys_data *sys) Link Here
1023
				  << IOP13XX_ATUX_PCIXSR_FUNC_NUM;
1023
				  << IOP13XX_ATUX_PCIXSR_FUNC_NUM;
1024
		__raw_writel(pcixsr, IOP13XX_ATUX_PCIXSR);
1024
		__raw_writel(pcixsr, IOP13XX_ATUX_PCIXSR);
1025
1025
1026
		res[0].start = IOP13XX_PCIX_LOWER_IO_PA;
1026
		res[0].start = IOP13XX_PCIX_LOWER_IO_PA + IOP13XX_PCIX_IO_BUS_OFFSET;
1027
		res[0].end   = IOP13XX_PCIX_UPPER_IO_PA;
1027
		res[0].end   = IOP13XX_PCIX_UPPER_IO_PA;
1028
		res[0].name  = "IQ81340 ATUX PCI I/O Space";
1028
		res[0].name  = "IQ81340 ATUX PCI I/O Space";
1029
		res[0].flags = IORESOURCE_IO;
1029
		res[0].flags = IORESOURCE_IO;
Lines 1033-1039 int iop13xx_pci_setup(int nr, struct pci_sys_data *sys) Link Here
1033
		res[1].name  = "IQ81340 ATUX PCI Memory Space";
1033
		res[1].name  = "IQ81340 ATUX PCI Memory Space";
1034
		res[1].flags = IORESOURCE_MEM;
1034
		res[1].flags = IORESOURCE_MEM;
1035
		sys->mem_offset = IOP13XX_PCIX_MEM_OFFSET;
1035
		sys->mem_offset = IOP13XX_PCIX_MEM_OFFSET;
1036
		sys->io_offset = IOP13XX_PCIX_IO_OFFSET;
1036
		sys->io_offset = IOP13XX_PCIX_LOWER_IO_PA;
1037
		break;
1037
		break;
1038
	case IOP13XX_INIT_ATU_ATUE:
1038
	case IOP13XX_INIT_ATU_ATUE:
1039
		/* Note: the function number field in the PCSR is ro */
1039
		/* Note: the function number field in the PCSR is ro */
Lines 1044-1050 int iop13xx_pci_setup(int nr, struct pci_sys_data *sys) Link Here
1044
1044
1045
		__raw_writel(pcsr, IOP13XX_ATUE_PCSR);
1045
		__raw_writel(pcsr, IOP13XX_ATUE_PCSR);
1046
1046
1047
		res[0].start = IOP13XX_PCIE_LOWER_IO_PA;
1047
		res[0].start = IOP13XX_PCIE_LOWER_IO_PA + IOP13XX_PCIE_IO_BUS_OFFSET;
1048
		res[0].end   = IOP13XX_PCIE_UPPER_IO_PA;
1048
		res[0].end   = IOP13XX_PCIE_UPPER_IO_PA;
1049
		res[0].name  = "IQ81340 ATUE PCI I/O Space";
1049
		res[0].name  = "IQ81340 ATUE PCI I/O Space";
1050
		res[0].flags = IORESOURCE_IO;
1050
		res[0].flags = IORESOURCE_IO;
Lines 1054-1060 int iop13xx_pci_setup(int nr, struct pci_sys_data *sys) Link Here
1054
		res[1].name  = "IQ81340 ATUE PCI Memory Space";
1054
		res[1].name  = "IQ81340 ATUE PCI Memory Space";
1055
		res[1].flags = IORESOURCE_MEM;
1055
		res[1].flags = IORESOURCE_MEM;
1056
		sys->mem_offset = IOP13XX_PCIE_MEM_OFFSET;
1056
		sys->mem_offset = IOP13XX_PCIE_MEM_OFFSET;
1057
		sys->io_offset = IOP13XX_PCIE_IO_OFFSET;
1057
		sys->io_offset = IOP13XX_PCIE_LOWER_IO_PA;
1058
		sys->map_irq = iop13xx_pcie_map_irq;
1058
		sys->map_irq = iop13xx_pcie_map_irq;
1059
		break;
1059
		break;
1060
	default:
1060
	default:
(-)a/arch/arm/plat-iop/time.c (-4 / +4 lines)
Lines 32-53 static unsigned long next_jiffy_time; Link Here
32
32
33
unsigned long iop_gettimeoffset(void)
33
unsigned long iop_gettimeoffset(void)
34
{
34
{
35
	unsigned long offset, temp1, temp2;
35
	unsigned long offset, temp;
36
36
37
	/* enable cp6, if necessary, to avoid taking the overhead of an
37
	/* enable cp6, if necessary, to avoid taking the overhead of an
38
	 * undefined instruction trap
38
	 * undefined instruction trap
39
	 */
39
	 */
40
	asm volatile (
40
	asm volatile (
41
	"mrc	p15, 0, %0, c15, c1, 0\n\t"
41
	"mrc	p15, 0, %0, c15, c1, 0\n\t"
42
	"ands	%1, %0, #(1 << 6)\n\t"
42
	"tst	%0, #(1 << 6)\n\t"
43
	"orreq	%0, %0, #(1 << 6)\n\t"
43
	"orreq	%0, %0, #(1 << 6)\n\t"
44
	"mcreq	p15, 0, %0, c15, c1, 0\n\t"
44
	"mcreq	p15, 0, %0, c15, c1, 0\n\t"
45
#ifdef CONFIG_XSCALE
45
#ifdef CONFIG_CPU_XSCALE
46
	"mrceq	p15, 0, %0, c15, c1, 0\n\t"
46
	"mrceq	p15, 0, %0, c15, c1, 0\n\t"
47
	"moveq	%0, %0\n\t"
47
	"moveq	%0, %0\n\t"
48
	"subeq	pc, pc, #4\n\t"
48
	"subeq	pc, pc, #4\n\t"
49
#endif
49
#endif
50
	: "=r"(temp1), "=r"(temp2) : : "cc");
50
	: "=r"(temp) : : "cc");
51
51
52
	offset = next_jiffy_time - read_tcr1();
52
	offset = next_jiffy_time - read_tcr1();
53
53
(-)a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c (-8 / +28 lines)
Lines 341-355 static int powernow_acpi_init(void) Link Here
341
	pc.val = (unsigned long) acpi_processor_perf->states[0].control;
341
	pc.val = (unsigned long) acpi_processor_perf->states[0].control;
342
	for (i = 0; i < number_scales; i++) {
342
	for (i = 0; i < number_scales; i++) {
343
		u8 fid, vid;
343
		u8 fid, vid;
344
		unsigned int speed;
344
		struct acpi_processor_px *state =
345
			&acpi_processor_perf->states[i];
346
		unsigned int speed, speed_mhz;
345
347
346
		pc.val = (unsigned long) acpi_processor_perf->states[i].control;
348
		pc.val = (unsigned long) state->control;
347
		dprintk ("acpi:  P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
349
		dprintk ("acpi:  P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
348
			 i,
350
			 i,
349
			 (u32) acpi_processor_perf->states[i].core_frequency,
351
			 (u32) state->core_frequency,
350
			 (u32) acpi_processor_perf->states[i].power,
352
			 (u32) state->power,
351
			 (u32) acpi_processor_perf->states[i].transition_latency,
353
			 (u32) state->transition_latency,
352
			 (u32) acpi_processor_perf->states[i].control,
354
			 (u32) state->control,
353
			 pc.bits.sgtc);
355
			 pc.bits.sgtc);
354
356
355
		vid = pc.bits.vid;
357
		vid = pc.bits.vid;
Lines 360-365 static int powernow_acpi_init(void) Link Here
360
		powernow_table[i].index |= (vid << 8); /* upper 8 bits */
362
		powernow_table[i].index |= (vid << 8); /* upper 8 bits */
361
363
362
		speed = powernow_table[i].frequency;
364
		speed = powernow_table[i].frequency;
365
		speed_mhz = speed / 1000;
366
367
		/* processor_perflib will multiply the MHz value by 1000 to
368
		 * get a KHz value (e.g. 1266000). However, powernow-k7 works
369
		 * with true KHz values (e.g. 1266768). To ensure that all
370
		 * powernow frequencies are available, we must ensure that
371
		 * ACPI doesn't restrict them, so we round up the MHz value
372
		 * to ensure that perflib's computed KHz value is greater than
373
		 * or equal to powernow's KHz value.
374
		 */
375
		if (speed % 1000 > 0)
376
			speed_mhz++;
363
377
364
		if ((fid_codes[fid] % 10)==5) {
378
		if ((fid_codes[fid] % 10)==5) {
365
			if (have_a0 == 1)
379
			if (have_a0 == 1)
Lines 368-377 static int powernow_acpi_init(void) Link Here
368
382
369
		dprintk ("   FID: 0x%x (%d.%dx [%dMHz])  "
383
		dprintk ("   FID: 0x%x (%d.%dx [%dMHz])  "
370
			 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
384
			 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
371
			 fid_codes[fid] % 10, speed/1000, vid,
385
			 fid_codes[fid] % 10, speed_mhz, vid,
372
			 mobile_vid_table[vid]/1000,
386
			 mobile_vid_table[vid]/1000,
373
			 mobile_vid_table[vid]%1000);
387
			 mobile_vid_table[vid]%1000);
374
388
389
		if (state->core_frequency != speed_mhz) {
390
			state->core_frequency = speed_mhz;
391
			dprintk("   Corrected ACPI frequency to %d\n",
392
				speed_mhz);
393
		}
394
375
		if (latency < pc.bits.sgtc)
395
		if (latency < pc.bits.sgtc)
376
			latency = pc.bits.sgtc;
396
			latency = pc.bits.sgtc;
377
397
Lines 602-608 static int __init powernow_cpu_init (struct cpufreq_policy *policy) Link Here
602
			result = powernow_acpi_init();
622
			result = powernow_acpi_init();
603
			if (result) {
623
			if (result) {
604
				printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
624
				printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
605
				printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.shtml\n");
625
				printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n");
606
			}
626
			}
607
		} else {
627
		} else {
608
			/* SGTC use the bus clock as timer */
628
			/* SGTC use the bus clock as timer */
(-)a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c (-1 / +1 lines)
Lines 521-527 static int check_supported_cpu(unsigned int cpu) Link Here
521
521
522
	if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
522
	if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
523
		if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
523
		if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
524
		    ((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) {
524
		    ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
525
			printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
525
			printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
526
			goto out;
526
			goto out;
527
		}
527
		}
(-)a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h (-2 / +2 lines)
Lines 46-53 struct powernow_k8_data { Link Here
46
#define CPUID_XFAM			0x0ff00000	/* extended family */
46
#define CPUID_XFAM			0x0ff00000	/* extended family */
47
#define CPUID_XFAM_K8			0
47
#define CPUID_XFAM_K8			0
48
#define CPUID_XMOD			0x000f0000	/* extended model */
48
#define CPUID_XMOD			0x000f0000	/* extended model */
49
#define CPUID_XMOD_REV_G		0x00060000
49
#define CPUID_XMOD_REV_MASK		0x00080000
50
#define CPUID_XFAM_10H 			0x00100000	/* family 0x10 */
50
#define CPUID_XFAM_10H			0x00100000	/* family 0x10 */
51
#define CPUID_USE_XFAM_XMOD		0x00000f00
51
#define CPUID_USE_XFAM_XMOD		0x00000f00
52
#define CPUID_GET_MAX_CAPABILITIES	0x80000000
52
#define CPUID_GET_MAX_CAPABILITIES	0x80000000
53
#define CPUID_FREQ_VOLT_CAPABILITIES	0x80000007
53
#define CPUID_FREQ_VOLT_CAPABILITIES	0x80000007
(-)a/arch/sparc64/kernel/of_device.c (+7 lines)
Lines 508-513 static int __init build_one_resource(struct device_node *parent, Link Here
508
			return 0;
508
			return 0;
509
	}
509
	}
510
510
511
	/* When we miss an I/O space match on PCI, just pass it up
512
	 * to the next PCI bridge and/or controller.
513
	 */
514
	if (!strcmp(bus->name, "pci") &&
515
	    (addr[0] & 0x03000000) == 0x01000000)
516
		return 0;
517
511
	return 1;
518
	return 1;
512
}
519
}
513
520
(-)a/arch/sparc64/kernel/prom.c (-4 / +15 lines)
Lines 1555-1564 static struct device_node * __init create_node(phandle node, struct device_node Link Here
1555
1555
1556
static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp)
1556
static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp)
1557
{
1557
{
1558
	struct device_node *ret = NULL, *prev_sibling = NULL;
1558
	struct device_node *dp;
1559
	struct device_node *dp;
1559
1560
1560
	dp = create_node(node, parent);
1561
	while (1) {
1561
	if (dp) {
1562
		dp = create_node(node, parent);
1563
		if (!dp)
1564
			break;
1565
1566
		if (prev_sibling)
1567
			prev_sibling->sibling = dp;
1568
1569
		if (!ret)
1570
			ret = dp;
1571
		prev_sibling = dp;
1572
1562
		*(*nextp) = dp;
1573
		*(*nextp) = dp;
1563
		*nextp = &dp->allnext;
1574
		*nextp = &dp->allnext;
1564
1575
Lines 1567-1576 static struct device_node * __init build_tree(struct device_node *parent, phandl Link Here
1567
1578
1568
		dp->child = build_tree(dp, prom_getchild(node), nextp);
1579
		dp->child = build_tree(dp, prom_getchild(node), nextp);
1569
1580
1570
		dp->sibling = build_tree(parent, prom_getsibling(node), nextp);
1581
		node = prom_getsibling(node);
1571
	}
1582
	}
1572
1583
1573
	return dp;
1584
	return ret;
1574
}
1585
}
1575
1586
1576
void __init prom_build_devicetree(void)
1587
void __init prom_build_devicetree(void)
(-)a/arch/sparc64/kernel/smp.c (+3 lines)
Lines 566-571 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t Link Here
566
	unsigned long flags, status;
566
	unsigned long flags, status;
567
	int cnt, retries, this_cpu, prev_sent, i;
567
	int cnt, retries, this_cpu, prev_sent, i;
568
568
569
	if (cpus_empty(mask))
570
		return;
571
569
	/* We have to do this whole thing with interrupts fully disabled.
572
	/* We have to do this whole thing with interrupts fully disabled.
570
	 * Otherwise if we send an xcall from interrupt context it will
573
	 * Otherwise if we send an xcall from interrupt context it will
571
	 * corrupt both our mondo block and cpu list state.
574
	 * corrupt both our mondo block and cpu list state.
(-)a/arch/x86_64/kernel/vsyscall.c (-1 / +1 lines)
Lines 132-138 static __always_inline void do_vgettimeofday(struct timeval * tv) Link Here
132
132
133
	/* convert to usecs and add to timespec: */
133
	/* convert to usecs and add to timespec: */
134
	tv->tv_usec += nsec_delta / NSEC_PER_USEC;
134
	tv->tv_usec += nsec_delta / NSEC_PER_USEC;
135
	while (tv->tv_usec > USEC_PER_SEC) {
135
	while (tv->tv_usec >= USEC_PER_SEC) {
136
		tv->tv_sec += 1;
136
		tv->tv_sec += 1;
137
		tv->tv_usec -= USEC_PER_SEC;
137
		tv->tv_usec -= USEC_PER_SEC;
138
	}
138
	}
(-)a/crypto/api.c (-1 / +3 lines)
Lines 48-55 EXPORT_SYMBOL_GPL(crypto_mod_get); Link Here
48
48
49
void crypto_mod_put(struct crypto_alg *alg)
49
void crypto_mod_put(struct crypto_alg *alg)
50
{
50
{
51
	struct module *module = alg->cra_module;
52
51
	crypto_alg_put(alg);
53
	crypto_alg_put(alg);
52
	module_put(alg->cra_module);
54
	module_put(module);
53
}
55
}
54
EXPORT_SYMBOL_GPL(crypto_mod_put);
56
EXPORT_SYMBOL_GPL(crypto_mod_put);
55
57
(-)a/drivers/acpi/tables/tbfadt.c (+14 lines)
Lines 347-352 static void acpi_tb_convert_fadt(void) Link Here
347
		acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
347
		acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
348
348
349
	}
349
	}
350
	/*
351
	 * _CST object and C States change notification start with
352
	 * ACPI 2.0 (FADT r3).  Although the field should be Reserved
353
	 * and 0 before then, some pre-r3 FADT set this field and
354
	 * it results in SMM-related boot failures.  For them, clear it.
355
	 */
356
	if ((acpi_gbl_FADT.header.revision < 3) &&
357
		(acpi_gbl_FADT.cst_control != 0)) {
358
			ACPI_WARNING((AE_INFO,
359
				"Ignoring BIOS FADT r%u C-state control",
360
				acpi_gbl_FADT.header.revision));
361
		 	acpi_gbl_FADT.cst_control = 0;
362
	}
363
350
}
364
}
351
365
352
/******************************************************************************
366
/******************************************************************************
(-)a/drivers/ata/libata-sff.c (-12 / +23 lines)
Lines 557-568 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int Link Here
557
	int i, p = 0;
557
	int i, p = 0;
558
	void __iomem * const *iomap;
558
	void __iomem * const *iomap;
559
559
560
	/* Discard disabled ports. Some controllers show their
561
	   unused channels this way */
562
	if (ata_resources_present(pdev, 0) == 0)
563
		ports &= ~ATA_PORT_PRIMARY;
564
	if (ata_resources_present(pdev, 1) == 0)
565
		ports &= ~ATA_PORT_SECONDARY;
566
560
	/* iomap BARs */
567
	/* iomap BARs */
561
	for (i = 0; i < 4; i++) {
568
	if (ports & ATA_PORT_PRIMARY) {
562
		if (pcim_iomap(pdev, i, 0) == NULL) {
569
		for (i = 0; i <= 1; i++) {
563
			dev_printk(KERN_ERR, &pdev->dev,
570
			if (pcim_iomap(pdev, i, 0) == NULL) {
564
				   "failed to iomap PCI BAR %d\n", i);
571
				dev_printk(KERN_ERR, &pdev->dev,
565
			return NULL;
572
					   "failed to iomap PCI BAR %d\n", i);
573
				return NULL;
574
			}
575
		}
576
	}
577
	if (ports & ATA_PORT_SECONDARY) {
578
		for (i = 2; i <= 3; i++) {
579
			if (pcim_iomap(pdev, i, 0) == NULL) {
580
				dev_printk(KERN_ERR, &pdev->dev,
581
					   "failed to iomap PCI BAR %d\n", i);
582
				return NULL;
583
			}
566
		}
584
		}
567
	}
585
	}
568
586
Lines 577-589 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int Link Here
577
	probe_ent->irq = pdev->irq;
595
	probe_ent->irq = pdev->irq;
578
	probe_ent->irq_flags = IRQF_SHARED;
596
	probe_ent->irq_flags = IRQF_SHARED;
579
597
580
	/* Discard disabled ports. Some controllers show their
581
	   unused channels this way */
582
	if (ata_resources_present(pdev, 0) == 0)
583
		ports &= ~ATA_PORT_PRIMARY;
584
	if (ata_resources_present(pdev, 1) == 0)
585
		ports &= ~ATA_PORT_SECONDARY;
586
587
	if (ports & ATA_PORT_PRIMARY) {
598
	if (ports & ATA_PORT_PRIMARY) {
588
		probe_ent->port[p].cmd_addr = iomap[0];
599
		probe_ent->port[p].cmd_addr = iomap[0];
589
		probe_ent->port[p].altstatus_addr =
600
		probe_ent->port[p].altstatus_addr =
(-)a/drivers/ata/sata_via.c (+8 lines)
Lines 97-102 static struct pci_driver svia_pci_driver = { Link Here
97
	.name			= DRV_NAME,
97
	.name			= DRV_NAME,
98
	.id_table		= svia_pci_tbl,
98
	.id_table		= svia_pci_tbl,
99
	.probe			= svia_init_one,
99
	.probe			= svia_init_one,
100
#ifdef CONFIG_PM
101
	.suspend		= ata_pci_device_suspend,
102
	.resume			= ata_pci_device_resume,
103
#endif
100
	.remove			= ata_pci_remove_one,
104
	.remove			= ata_pci_remove_one,
101
};
105
};
102
106
Lines 116-121 static struct scsi_host_template svia_sht = { Link Here
116
	.slave_configure	= ata_scsi_slave_config,
120
	.slave_configure	= ata_scsi_slave_config,
117
	.slave_destroy		= ata_scsi_slave_destroy,
121
	.slave_destroy		= ata_scsi_slave_destroy,
118
	.bios_param		= ata_std_bios_param,
122
	.bios_param		= ata_std_bios_param,
123
#ifdef CONFIG_PM
124
	.suspend		= ata_scsi_device_suspend,
125
	.resume			= ata_scsi_device_resume,
126
#endif
119
};
127
};
120
128
121
static const struct ata_port_operations vt6420_sata_ops = {
129
static const struct ata_port_operations vt6420_sata_ops = {
(-)a/drivers/base/core.c (-3 / +4 lines)
Lines 93-98 static void device_release(struct kobject * kobj) Link Here
93
{
93
{
94
	struct device * dev = to_dev(kobj);
94
	struct device * dev = to_dev(kobj);
95
95
96
	kfree(dev->devt_attr);
97
	dev->devt_attr = NULL;
98
96
	if (dev->release)
99
	if (dev->release)
97
		dev->release(dev);
100
		dev->release(dev);
98
	else if (dev->type && dev->type->release)
101
	else if (dev->type && dev->type->release)
Lines 765-774 void device_del(struct device * dev) Link Here
765
768
766
	if (parent)
769
	if (parent)
767
		klist_del(&dev->knode_parent);
770
		klist_del(&dev->knode_parent);
768
	if (dev->devt_attr) {
771
	if (dev->devt_attr)
769
		device_remove_file(dev, dev->devt_attr);
772
		device_remove_file(dev, dev->devt_attr);
770
		kfree(dev->devt_attr);
771
	}
772
	if (dev->class) {
773
	if (dev->class) {
773
		sysfs_remove_link(&dev->kobj, "subsystem");
774
		sysfs_remove_link(&dev->kobj, "subsystem");
774
		/* If this is not a "fake" compatible device, remove the
775
		/* If this is not a "fake" compatible device, remove the
(-)a/drivers/char/ipmi/ipmi_si_intf.c (-2 / +2 lines)
Lines 1859-1868 static __devinit int try_init_acpi(struct SPMITable *spmi) Link Here
1859
1859
1860
	if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1860
	if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1861
		info->io_setup = mem_setup;
1861
		info->io_setup = mem_setup;
1862
		info->io.addr_type = IPMI_IO_ADDR_SPACE;
1862
		info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1863
	} else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1863
	} else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1864
		info->io_setup = port_setup;
1864
		info->io_setup = port_setup;
1865
		info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1865
		info->io.addr_type = IPMI_IO_ADDR_SPACE;
1866
	} else {
1866
	} else {
1867
		kfree(info);
1867
		kfree(info);
1868
		printk("ipmi_si: Unknown ACPI I/O Address type\n");
1868
		printk("ipmi_si: Unknown ACPI I/O Address type\n");
(-)a/drivers/md/raid1.c (-14 / +19 lines)
Lines 271-291 static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int Link Here
271
	 */
271
	 */
272
	update_head_pos(mirror, r1_bio);
272
	update_head_pos(mirror, r1_bio);
273
273
274
	if (uptodate || (conf->raid_disks - conf->mddev->degraded) <= 1) {
274
	if (uptodate)
275
		/*
275
		set_bit(R1BIO_Uptodate, &r1_bio->state);
276
		 * Set R1BIO_Uptodate in our master bio, so that
276
	else {
277
		 * we will return a good error code for to the higher
277
		/* If all other devices have failed, we want to return
278
		 * levels even if IO on some other mirrored buffer fails.
278
		 * the error upwards rather than fail the last device.
279
		 *
279
		 * Here we redefine "uptodate" to mean "Don't want to retry"
280
		 * The 'master' represents the composite IO operation to
281
		 * user-side. So if something waits for IO, then it will
282
		 * wait for the 'master' bio.
283
		 */
280
		 */
284
		if (uptodate)
281
		unsigned long flags;
285
			set_bit(R1BIO_Uptodate, &r1_bio->state);
282
		spin_lock_irqsave(&conf->device_lock, flags);
283
		if (r1_bio->mddev->degraded == conf->raid_disks ||
284
		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
285
		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
286
			uptodate = 1;
287
		spin_unlock_irqrestore(&conf->device_lock, flags);
288
	}
286
289
290
	if (uptodate)
287
		raid_end_bio_io(r1_bio);
291
		raid_end_bio_io(r1_bio);
288
	} else {
292
	else {
289
		/*
293
		/*
290
		 * oops, read error:
294
		 * oops, read error:
291
		 */
295
		 */
Lines 992-1004 static void error(mddev_t *mddev, mdk_rdev_t *rdev) Link Here
992
		unsigned long flags;
996
		unsigned long flags;
993
		spin_lock_irqsave(&conf->device_lock, flags);
997
		spin_lock_irqsave(&conf->device_lock, flags);
994
		mddev->degraded++;
998
		mddev->degraded++;
999
		set_bit(Faulty, &rdev->flags);
995
		spin_unlock_irqrestore(&conf->device_lock, flags);
1000
		spin_unlock_irqrestore(&conf->device_lock, flags);
996
		/*
1001
		/*
997
		 * if recovery is running, make sure it aborts.
1002
		 * if recovery is running, make sure it aborts.
998
		 */
1003
		 */
999
		set_bit(MD_RECOVERY_ERR, &mddev->recovery);
1004
		set_bit(MD_RECOVERY_ERR, &mddev->recovery);
1000
	}
1005
	} else
1001
	set_bit(Faulty, &rdev->flags);
1006
		set_bit(Faulty, &rdev->flags);
1002
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1007
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1003
	printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
1008
	printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
1004
		"	Operation continuing on %d devices\n",
1009
		"	Operation continuing on %d devices\n",
(-)a/drivers/message/fusion/mptspi.c (-3 / +5 lines)
Lines 726-738 static int mptspi_slave_configure(struct scsi_device *sdev) Link Here
726
	struct _MPT_SCSI_HOST *hd =
726
	struct _MPT_SCSI_HOST *hd =
727
		(struct _MPT_SCSI_HOST *)sdev->host->hostdata;
727
		(struct _MPT_SCSI_HOST *)sdev->host->hostdata;
728
	VirtTarget *vtarget = scsi_target(sdev)->hostdata;
728
	VirtTarget *vtarget = scsi_target(sdev)->hostdata;
729
	int ret = mptscsih_slave_configure(sdev);
729
	int ret;
730
731
	mptspi_initTarget(hd, vtarget, sdev);
732
733
	ret = mptscsih_slave_configure(sdev);
730
734
731
	if (ret)
735
	if (ret)
732
		return ret;
736
		return ret;
733
737
734
	mptspi_initTarget(hd, vtarget, sdev);
735
736
	ddvprintk((MYIOC_s_INFO_FMT "id=%d min_period=0x%02x"
738
	ddvprintk((MYIOC_s_INFO_FMT "id=%d min_period=0x%02x"
737
		" max_offset=0x%02x max_width=%d\n", hd->ioc->name,
739
		" max_offset=0x%02x max_width=%d\n", hd->ioc->name,
738
		sdev->id, spi_min_period(scsi_target(sdev)),
740
		sdev->id, spi_min_period(scsi_target(sdev)),
(-)a/drivers/net/Kconfig (-5 lines)
Lines 2929-2939 endif #NETDEVICES Link Here
2929
config NETPOLL
2929
config NETPOLL
2930
	def_bool NETCONSOLE
2930
	def_bool NETCONSOLE
2931
2931
2932
config NETPOLL_RX
2933
	bool "Netpoll support for trapping incoming packets"
2934
	default n
2935
	depends on NETPOLL
2936
2937
config NETPOLL_TRAP
2932
config NETPOLL_TRAP
2938
	bool "Netpoll traffic trapping"
2933
	bool "Netpoll traffic trapping"
2939
	default n
2934
	default n
(-)a/drivers/net/bnx2.c (-4 / +11 lines)
Lines 54-61 Link Here
54
54
55
#define DRV_MODULE_NAME		"bnx2"
55
#define DRV_MODULE_NAME		"bnx2"
56
#define PFX DRV_MODULE_NAME	": "
56
#define PFX DRV_MODULE_NAME	": "
57
#define DRV_MODULE_VERSION	"1.5.8"
57
#define DRV_MODULE_VERSION	"1.5.8.1"
58
#define DRV_MODULE_RELDATE	"April 24, 2007"
58
#define DRV_MODULE_RELDATE	"May 7, 2007"
59
59
60
#define RUN_AT(x) (jiffies + (x))
60
#define RUN_AT(x) (jiffies + (x))
61
61
Lines 4510-4517 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) Link Here
4510
		vlan_tag_flags |=
4510
		vlan_tag_flags |=
4511
			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4511
			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4512
	}
4512
	}
4513
	if ((mss = skb_shinfo(skb)->gso_size) &&
4513
	if ((mss = skb_shinfo(skb)->gso_size)) {
4514
		(skb->len > (bp->dev->mtu + ETH_HLEN))) {
4515
		u32 tcp_opt_len, ip_tcp_len;
4514
		u32 tcp_opt_len, ip_tcp_len;
4516
4515
4517
		if (skb_header_cloned(skb) &&
4516
		if (skb_header_cloned(skb) &&
Lines 5565-5570 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) Link Here
5565
	case SIOCGMIIREG: {
5564
	case SIOCGMIIREG: {
5566
		u32 mii_regval;
5565
		u32 mii_regval;
5567
5566
5567
		if (!netif_running(dev))
5568
			return -EAGAIN;
5569
5568
		spin_lock_bh(&bp->phy_lock);
5570
		spin_lock_bh(&bp->phy_lock);
5569
		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5571
		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5570
		spin_unlock_bh(&bp->phy_lock);
5572
		spin_unlock_bh(&bp->phy_lock);
Lines 5578-5583 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) Link Here
5578
		if (!capable(CAP_NET_ADMIN))
5580
		if (!capable(CAP_NET_ADMIN))
5579
			return -EPERM;
5581
			return -EPERM;
5580
5582
5583
		if (!netif_running(dev))
5584
			return -EAGAIN;
5585
5581
		spin_lock_bh(&bp->phy_lock);
5586
		spin_lock_bh(&bp->phy_lock);
5582
		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5587
		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5583
		spin_unlock_bh(&bp->phy_lock);
5588
		spin_unlock_bh(&bp->phy_lock);
Lines 6143-6148 bnx2_suspend(struct pci_dev *pdev, pm_message_t state) Link Here
6143
		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6148
		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6144
	bnx2_reset_chip(bp, reset_code);
6149
	bnx2_reset_chip(bp, reset_code);
6145
	bnx2_free_skbs(bp);
6150
	bnx2_free_skbs(bp);
6151
	pci_save_state(pdev);
6146
	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6152
	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6147
	return 0;
6153
	return 0;
6148
}
6154
}
Lines 6156-6161 bnx2_resume(struct pci_dev *pdev) Link Here
6156
	if (!netif_running(dev))
6162
	if (!netif_running(dev))
6157
		return 0;
6163
		return 0;
6158
6164
6165
	pci_restore_state(pdev);
6159
	bnx2_set_power_state(bp, PCI_D0);
6166
	bnx2_set_power_state(bp, PCI_D0);
6160
	netif_device_attach(dev);
6167
	netif_device_attach(dev);
6161
	bnx2_init_nic(bp);
6168
	bnx2_init_nic(bp);
(-)a/drivers/net/sis900.c (-4 / +5 lines)
Lines 1754-1759 static int sis900_rx(struct net_device *net_dev) Link Here
1754
			sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1754
			sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1755
		} else {
1755
		} else {
1756
			struct sk_buff * skb;
1756
			struct sk_buff * skb;
1757
			struct sk_buff * rx_skb;
1757
1758
1758
			pci_unmap_single(sis_priv->pci_dev,
1759
			pci_unmap_single(sis_priv->pci_dev,
1759
				sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
1760
				sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
Lines 1787-1796 static int sis900_rx(struct net_device *net_dev) Link Here
1787
			}
1788
			}
1788
1789
1789
			/* give the socket buffer to upper layers */
1790
			/* give the socket buffer to upper layers */
1790
			skb = sis_priv->rx_skbuff[entry];
1791
			rx_skb = sis_priv->rx_skbuff[entry];
1791
			skb_put(skb, rx_size);
1792
			skb_put(rx_skb, rx_size);
1792
			skb->protocol = eth_type_trans(skb, net_dev);
1793
			rx_skb->protocol = eth_type_trans(rx_skb, net_dev);
1793
			netif_rx(skb);
1794
			netif_rx(rx_skb);
1794
1795
1795
			/* some network statistics */
1796
			/* some network statistics */
1796
			if ((rx_status & BCAST) == MCAST)
1797
			if ((rx_status & BCAST) == MCAST)
(-)a/drivers/net/skge.c (-4 / +18 lines)
Lines 135-144 static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, Link Here
135
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
135
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
136
static u32 wol_supported(const struct skge_hw *hw)
136
static u32 wol_supported(const struct skge_hw *hw)
137
{
137
{
138
	if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev != 0)
138
	if (hw->chip_id == CHIP_ID_GENESIS)
139
		return WAKE_MAGIC | WAKE_PHY;
139
		return 0;
140
	else
140
141
	if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
141
		return 0;
142
		return 0;
143
144
	return WAKE_MAGIC | WAKE_PHY;
142
}
145
}
143
146
144
static u32 pci_wake_enabled(struct pci_dev *dev)
147
static u32 pci_wake_enabled(struct pci_dev *dev)
Lines 3583-3589 static struct net_device *skge_devinit(struct skge_hw *hw, int port, Link Here
3583
	skge->duplex = -1;
3586
	skge->duplex = -1;
3584
	skge->speed = -1;
3587
	skge->speed = -1;
3585
	skge->advertising = skge_supported_modes(hw);
3588
	skge->advertising = skge_supported_modes(hw);
3586
	skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0;
3589
3590
	if (pci_wake_enabled(hw->pdev))
3591
		skge->wol = wol_supported(hw) & WAKE_MAGIC;
3587
3592
3588
	hw->dev[port] = dev;
3593
	hw->dev[port] = dev;
3589
3594
Lines 3789-3794 static int skge_suspend(struct pci_dev *pdev, pm_message_t state) Link Here
3789
	struct skge_hw *hw  = pci_get_drvdata(pdev);
3794
	struct skge_hw *hw  = pci_get_drvdata(pdev);
3790
	int i, err, wol = 0;
3795
	int i, err, wol = 0;
3791
3796
3797
	if (!hw)
3798
		return 0;
3799
3792
	err = pci_save_state(pdev);
3800
	err = pci_save_state(pdev);
3793
	if (err)
3801
	if (err)
3794
		return err;
3802
		return err;
Lines 3817-3822 static int skge_resume(struct pci_dev *pdev) Link Here
3817
	struct skge_hw *hw  = pci_get_drvdata(pdev);
3825
	struct skge_hw *hw  = pci_get_drvdata(pdev);
3818
	int i, err;
3826
	int i, err;
3819
3827
3828
	if (!hw)
3829
		return 0;
3830
3820
	err = pci_set_power_state(pdev, PCI_D0);
3831
	err = pci_set_power_state(pdev, PCI_D0);
3821
	if (err)
3832
	if (err)
3822
		goto out;
3833
		goto out;
Lines 3855-3860 static void skge_shutdown(struct pci_dev *pdev) Link Here
3855
	struct skge_hw *hw  = pci_get_drvdata(pdev);
3866
	struct skge_hw *hw  = pci_get_drvdata(pdev);
3856
	int i, wol = 0;
3867
	int i, wol = 0;
3857
3868
3869
	if (!hw)
3870
		return;
3871
3858
	for (i = 0; i < hw->ports; i++) {
3872
	for (i = 0; i < hw->ports; i++) {
3859
		struct net_device *dev = hw->dev[i];
3873
		struct net_device *dev = hw->dev[i];
3860
		struct skge_port *skge = netdev_priv(dev);
3874
		struct skge_port *skge = netdev_priv(dev);
(-)a/drivers/net/sky2.c (-4 / +11 lines)
Lines 123-138 static const struct pci_device_id sky2_id_table[] = { Link Here
123
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
123
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
124
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
124
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
125
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
125
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
126
#ifdef broken
127
	/* This device causes data corruption problems that are not resolved */
128
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
126
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
129
#endif
130
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
127
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
131
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
128
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
132
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
129
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
133
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
130
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
134
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
131
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
135
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
132
//	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
136
	{ 0 }
133
	{ 0 }
137
};
134
};
138
135
Lines 3722-3727 err_out_free_regions: Link Here
3722
	pci_release_regions(pdev);
3719
	pci_release_regions(pdev);
3723
	pci_disable_device(pdev);
3720
	pci_disable_device(pdev);
3724
err_out:
3721
err_out:
3722
	pci_set_drvdata(pdev, NULL);
3725
	return err;
3723
	return err;
3726
}
3724
}
3727
3725
Lines 3774-3779 static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) Link Here
3774
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3772
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3775
	int i, wol = 0;
3773
	int i, wol = 0;
3776
3774
3775
	if (!hw)
3776
		return 0;
3777
3777
	del_timer_sync(&hw->idle_timer);
3778
	del_timer_sync(&hw->idle_timer);
3778
	netif_poll_disable(hw->dev[0]);
3779
	netif_poll_disable(hw->dev[0]);
3779
3780
Lines 3805-3810 static int sky2_resume(struct pci_dev *pdev) Link Here
3805
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3806
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3806
	int i, err;
3807
	int i, err;
3807
3808
3809
	if (!hw)
3810
		return 0;
3811
3808
	err = pci_set_power_state(pdev, PCI_D0);
3812
	err = pci_set_power_state(pdev, PCI_D0);
3809
	if (err)
3813
	if (err)
3810
		goto out;
3814
		goto out;
Lines 3851-3856 static void sky2_shutdown(struct pci_dev *pdev) Link Here
3851
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3855
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3852
	int i, wol = 0;
3856
	int i, wol = 0;
3853
3857
3858
	if (!hw)
3859
		return;
3860
3854
	del_timer_sync(&hw->idle_timer);
3861
	del_timer_sync(&hw->idle_timer);
3855
	netif_poll_disable(hw->dev[0]);
3862
	netif_poll_disable(hw->dev[0]);
3856
3863
(-)a/drivers/net/smc911x.c (-1 / +1 lines)
Lines 499-505 static inline void smc911x_rcv(struct net_device *dev) Link Here
499
		SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
499
		SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
500
		SMC_PULL_DATA(data, pkt_len+2+3);
500
		SMC_PULL_DATA(data, pkt_len+2+3);
501
501
502
		DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,);
502
		DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
503
		PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
503
		PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
504
		dev->last_rx = jiffies;
504
		dev->last_rx = jiffies;
505
		skb->dev = dev;
505
		skb->dev = dev;
(-)a/drivers/net/tg3.c (-25 / +23 lines)
Lines 64-71 Link Here
64
64
65
#define DRV_MODULE_NAME		"tg3"
65
#define DRV_MODULE_NAME		"tg3"
66
#define PFX DRV_MODULE_NAME	": "
66
#define PFX DRV_MODULE_NAME	": "
67
#define DRV_MODULE_VERSION	"3.75"
67
#define DRV_MODULE_VERSION	"3.75.1"
68
#define DRV_MODULE_RELDATE	"March 23, 2007"
68
#define DRV_MODULE_RELDATE	"May 7, 2007"
69
69
70
#define TG3_DEF_MAC_MODE	0
70
#define TG3_DEF_MAC_MODE	0
71
#define TG3_DEF_RX_MODE		0
71
#define TG3_DEF_RX_MODE		0
Lines 3895-3902 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) Link Here
3895
	entry = tp->tx_prod;
3895
	entry = tp->tx_prod;
3896
	base_flags = 0;
3896
	base_flags = 0;
3897
	mss = 0;
3897
	mss = 0;
3898
	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3898
	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
3899
	    (mss = skb_shinfo(skb)->gso_size) != 0) {
3900
		int tcp_opt_len, ip_tcp_len;
3899
		int tcp_opt_len, ip_tcp_len;
3901
3900
3902
		if (skb_header_cloned(skb) &&
3901
		if (skb_header_cloned(skb) &&
Lines 4053-4060 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) Link Here
4053
	if (skb->ip_summed == CHECKSUM_PARTIAL)
4052
	if (skb->ip_summed == CHECKSUM_PARTIAL)
4054
		base_flags |= TXD_FLAG_TCPUDP_CSUM;
4053
		base_flags |= TXD_FLAG_TCPUDP_CSUM;
4055
	mss = 0;
4054
	mss = 0;
4056
	if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4055
	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4057
	    (mss = skb_shinfo(skb)->gso_size) != 0) {
4058
		int tcp_opt_len, ip_tcp_len, hdr_len;
4056
		int tcp_opt_len, ip_tcp_len, hdr_len;
4059
4057
4060
		if (skb_header_cloned(skb) &&
4058
		if (skb_header_cloned(skb) &&
Lines 5936-5942 static int tg3_load_tso_firmware(struct tg3 *tp) Link Here
5936
5934
5937
5935
5938
/* tp->lock is held. */
5936
/* tp->lock is held. */
5939
static void __tg3_set_mac_addr(struct tg3 *tp)
5937
static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
5940
{
5938
{
5941
	u32 addr_high, addr_low;
5939
	u32 addr_high, addr_low;
5942
	int i;
5940
	int i;
Lines 5948-5953 static void __tg3_set_mac_addr(struct tg3 *tp) Link Here
5948
		    (tp->dev->dev_addr[4] <<  8) |
5946
		    (tp->dev->dev_addr[4] <<  8) |
5949
		    (tp->dev->dev_addr[5] <<  0));
5947
		    (tp->dev->dev_addr[5] <<  0));
5950
	for (i = 0; i < 4; i++) {
5948
	for (i = 0; i < 4; i++) {
5949
		if (i == 1 && skip_mac_1)
5950
			continue;
5951
		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5951
		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5952
		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5952
		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5953
	}
5953
	}
Lines 5974-5980 static int tg3_set_mac_addr(struct net_device *dev, void *p) Link Here
5974
{
5974
{
5975
	struct tg3 *tp = netdev_priv(dev);
5975
	struct tg3 *tp = netdev_priv(dev);
5976
	struct sockaddr *addr = p;
5976
	struct sockaddr *addr = p;
5977
	int err = 0;
5977
	int err = 0, skip_mac_1 = 0;
5978
5978
5979
	if (!is_valid_ether_addr(addr->sa_data))
5979
	if (!is_valid_ether_addr(addr->sa_data))
5980
		return -EINVAL;
5980
		return -EINVAL;
Lines 5985-6006 static int tg3_set_mac_addr(struct net_device *dev, void *p) Link Here
5985
		return 0;
5985
		return 0;
5986
5986
5987
	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5987
	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5988
		/* Reset chip so that ASF can re-init any MAC addresses it
5988
		u32 addr0_high, addr0_low, addr1_high, addr1_low;
5989
		 * needs.
5990
		 */
5991
		tg3_netif_stop(tp);
5992
		tg3_full_lock(tp, 1);
5993
5989
5994
		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5990
		addr0_high = tr32(MAC_ADDR_0_HIGH);
5995
		err = tg3_restart_hw(tp, 0);
5991
		addr0_low = tr32(MAC_ADDR_0_LOW);
5996
		if (!err)
5992
		addr1_high = tr32(MAC_ADDR_1_HIGH);
5997
			tg3_netif_start(tp);
5993
		addr1_low = tr32(MAC_ADDR_1_LOW);
5998
		tg3_full_unlock(tp);
5994
5999
	} else {
5995
		/* Skip MAC addr 1 if ASF is using it. */
6000
		spin_lock_bh(&tp->lock);
5996
		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6001
		__tg3_set_mac_addr(tp);
5997
		    !(addr1_high == 0 && addr1_low == 0))
6002
		spin_unlock_bh(&tp->lock);
5998
			skip_mac_1 = 1;
6003
	}
5999
	}
6000
	spin_lock_bh(&tp->lock);
6001
	__tg3_set_mac_addr(tp, skip_mac_1);
6002
	spin_unlock_bh(&tp->lock);
6004
6003
6005
	return err;
6004
	return err;
6006
}
6005
}
Lines 6317-6323 static int tg3_reset_hw(struct tg3 *tp, int reset_phy) Link Here
6317
		     tp->rx_jumbo_ptr);
6316
		     tp->rx_jumbo_ptr);
6318
6317
6319
	/* Initialize MAC address and backoff seed. */
6318
	/* Initialize MAC address and backoff seed. */
6320
	__tg3_set_mac_addr(tp);
6319
	__tg3_set_mac_addr(tp, 0);
6321
6320
6322
	/* MTU + ethernet header + FCS + optional VLAN tag */
6321
	/* MTU + ethernet header + FCS + optional VLAN tag */
6323
	tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6322
	tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
Lines 6348-6355 static int tg3_reset_hw(struct tg3 *tp, int reset_phy) Link Here
6348
	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6347
	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6349
	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6348
	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6350
		if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6349
		if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6351
		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6350
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6352
		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6353
			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6351
			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6354
		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6352
		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6355
			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6353
			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
(-)a/drivers/pci/quirks.c (-7 / +9 lines)
Lines 1737-1754 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, Link Here
1737
			quirk_nvidia_ck804_pcie_aer_ext_cap);
1737
			quirk_nvidia_ck804_pcie_aer_ext_cap);
1738
1738
1739
#ifdef CONFIG_PCI_MSI
1739
#ifdef CONFIG_PCI_MSI
1740
/* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely
1740
/* Some chipsets do not support MSI. We cannot easily rely on setting
1741
 * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
1741
 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
1742
 * some other busses controlled by the chipset even if Linux is not aware of it.
1742
 * some other busses controlled by the chipset even if Linux is not
1743
 * Instead of setting the flag on all busses in the machine, simply disable MSI
1743
 * aware of it.  Instead of setting the flag on all busses in the
1744
 * globally.
1744
 * machine, simply disable MSI globally.
1745
 */
1745
 */
1746
static void __init quirk_svw_msi(struct pci_dev *dev)
1746
static void __init quirk_disable_all_msi(struct pci_dev *dev)
1747
{
1747
{
1748
	pci_no_msi();
1748
	pci_no_msi();
1749
	printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n");
1749
	printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n");
1750
}
1750
}
1751
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi);
1751
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
1752
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
1753
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
1752
1754
1753
/* Disable MSI on chipsets that are known to not support it */
1755
/* Disable MSI on chipsets that are known to not support it */
1754
static void __devinit quirk_disable_msi(struct pci_dev *dev)
1756
static void __devinit quirk_disable_msi(struct pci_dev *dev)
(-)a/drivers/serial/sunhv.c (+4 lines)
Lines 493-498 static struct of_device_id hv_match[] = { Link Here
493
		.name = "console",
493
		.name = "console",
494
		.compatible = "qcn",
494
		.compatible = "qcn",
495
	},
495
	},
496
	{
497
		.name = "console",
498
		.compatible = "SUNW,sun4v-console",
499
	},
496
	{},
500
	{},
497
};
501
};
498
MODULE_DEVICE_TABLE(of, hv_match);
502
MODULE_DEVICE_TABLE(of, hv_match);
(-)a/drivers/usb/atm/cxacru.c (-4 / +36 lines)
Lines 146-151 enum cxacru_info_idx { Link Here
146
	CXINF_MAX = 0x1c,
146
	CXINF_MAX = 0x1c,
147
};
147
};
148
148
149
enum poll_state {
150
	CX_INIT,
151
	CX_POLLING,
152
	CX_ABORT
153
};
154
149
struct cxacru_modem_type {
155
struct cxacru_modem_type {
150
	u32 pll_f_clk;
156
	u32 pll_f_clk;
151
	u32 pll_b_clk;
157
	u32 pll_b_clk;
Lines 159-164 struct cxacru_data { Link Here
159
165
160
	int line_status;
166
	int line_status;
161
	struct delayed_work poll_work;
167
	struct delayed_work poll_work;
168
	struct mutex poll_state_serialize;
169
	enum poll_state poll_state;
162
170
163
	/* contol handles */
171
	/* contol handles */
164
	struct mutex cm_serialize;
172
	struct mutex cm_serialize;
Lines 356-362 static int cxacru_atm_start(struct usbatm_data *usbatm_instance, Link Here
356
	/*
364
	/*
357
	struct atm_dev *atm_dev = usbatm_instance->atm_dev;
365
	struct atm_dev *atm_dev = usbatm_instance->atm_dev;
358
	*/
366
	*/
359
	int ret;
367
	int ret, start_polling = 1;
360
368
361
	dbg("cxacru_atm_start");
369
	dbg("cxacru_atm_start");
362
370
Lines 376-382 static int cxacru_atm_start(struct usbatm_data *usbatm_instance, Link Here
376
	}
384
	}
377
385
378
	/* Start status polling */
386
	/* Start status polling */
379
	cxacru_poll_status(&instance->poll_work.work);
387
	mutex_lock(&instance->poll_state_serialize);
388
	if (instance->poll_state == CX_INIT)
389
		instance->poll_state = CX_POLLING;
390
	else /* poll_state == CX_ABORT */
391
		start_polling = 0;
392
	mutex_unlock(&instance->poll_state_serialize);
393
394
	if (start_polling)
395
		cxacru_poll_status(&instance->poll_work.work);
380
	return 0;
396
	return 0;
381
}
397
}
382
398
Lines 685-690 static int cxacru_bind(struct usbatm_data *usbatm_instance, Link Here
685
	instance->usbatm = usbatm_instance;
701
	instance->usbatm = usbatm_instance;
686
	instance->modem_type = (struct cxacru_modem_type *) id->driver_info;
702
	instance->modem_type = (struct cxacru_modem_type *) id->driver_info;
687
703
704
	mutex_init(&instance->poll_state_serialize);
705
	instance->poll_state = CX_INIT;
706
688
	instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL);
707
	instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL);
689
	if (!instance->rcv_buf) {
708
	if (!instance->rcv_buf) {
690
		dbg("cxacru_bind: no memory for rcv_buf");
709
		dbg("cxacru_bind: no memory for rcv_buf");
Lines 744-749 static void cxacru_unbind(struct usbatm_data *usbatm_instance, Link Here
744
		struct usb_interface *intf)
763
		struct usb_interface *intf)
745
{
764
{
746
	struct cxacru_data *instance = usbatm_instance->driver_data;
765
	struct cxacru_data *instance = usbatm_instance->driver_data;
766
	int stop_polling = 1;
747
767
748
	dbg("cxacru_unbind entered");
768
	dbg("cxacru_unbind entered");
749
769
Lines 752-759 static void cxacru_unbind(struct usbatm_data *usbatm_instance, Link Here
752
		return;
772
		return;
753
	}
773
	}
754
774
755
	while (!cancel_delayed_work(&instance->poll_work))
775
	mutex_lock(&instance->poll_state_serialize);
756
	       flush_scheduled_work();
776
	if (instance->poll_state != CX_POLLING) {
777
		/* Polling hasn't started yet and with
778
		 * the mutex locked it can be prevented
779
		 * from starting.
780
		 */
781
		instance->poll_state = CX_ABORT;
782
		stop_polling = 0;
783
	}
784
	mutex_unlock(&instance->poll_state_serialize);
785
786
	if (stop_polling)
787
		while (!cancel_delayed_work(&instance->poll_work))
788
			flush_scheduled_work();
757
789
758
	usb_kill_urb(instance->snd_urb);
790
	usb_kill_urb(instance->snd_urb);
759
	usb_kill_urb(instance->rcv_urb);
791
	usb_kill_urb(instance->rcv_urb);
(-)a/drivers/usb/input/hiddev.c (+14 lines)
Lines 51-56 struct hiddev { Link Here
51
	wait_queue_head_t wait;
51
	wait_queue_head_t wait;
52
	struct hid_device *hid;
52
	struct hid_device *hid;
53
	struct list_head list;
53
	struct list_head list;
54
	spinlock_t list_lock;
54
};
55
};
55
56
56
struct hiddev_list {
57
struct hiddev_list {
Lines 161-167 static void hiddev_send_event(struct hid_device *hid, Link Here
161
{
162
{
162
	struct hiddev *hiddev = hid->hiddev;
163
	struct hiddev *hiddev = hid->hiddev;
163
	struct hiddev_list *list;
164
	struct hiddev_list *list;
165
	unsigned long flags;
164
166
167
	spin_lock_irqsave(&hiddev->list_lock, flags);
165
	list_for_each_entry(list, &hiddev->list, node) {
168
	list_for_each_entry(list, &hiddev->list, node) {
166
		if (uref->field_index != HID_FIELD_INDEX_NONE ||
169
		if (uref->field_index != HID_FIELD_INDEX_NONE ||
167
		    (list->flags & HIDDEV_FLAG_REPORT) != 0) {
170
		    (list->flags & HIDDEV_FLAG_REPORT) != 0) {
Lines 171-176 static void hiddev_send_event(struct hid_device *hid, Link Here
171
			kill_fasync(&list->fasync, SIGIO, POLL_IN);
174
			kill_fasync(&list->fasync, SIGIO, POLL_IN);
172
		}
175
		}
173
	}
176
	}
177
	spin_unlock_irqrestore(&hiddev->list_lock, flags);
174
178
175
	wake_up_interruptible(&hiddev->wait);
179
	wake_up_interruptible(&hiddev->wait);
176
}
180
}
Lines 235-243 static int hiddev_fasync(int fd, struct file *file, int on) Link Here
235
static int hiddev_release(struct inode * inode, struct file * file)
239
static int hiddev_release(struct inode * inode, struct file * file)
236
{
240
{
237
	struct hiddev_list *list = file->private_data;
241
	struct hiddev_list *list = file->private_data;
242
	unsigned long flags;
238
243
239
	hiddev_fasync(-1, file, 0);
244
	hiddev_fasync(-1, file, 0);
245
246
	spin_lock_irqsave(&list->hiddev->list_lock, flags);
240
	list_del(&list->node);
247
	list_del(&list->node);
248
	spin_unlock_irqrestore(&list->hiddev->list_lock, flags);
241
249
242
	if (!--list->hiddev->open) {
250
	if (!--list->hiddev->open) {
243
		if (list->hiddev->exist)
251
		if (list->hiddev->exist)
Lines 257-262 static int hiddev_release(struct inode * inode, struct file * file) Link Here
257
static int hiddev_open(struct inode *inode, struct file *file)
265
static int hiddev_open(struct inode *inode, struct file *file)
258
{
266
{
259
	struct hiddev_list *list;
267
	struct hiddev_list *list;
268
	unsigned long flags;
260
269
261
	int i = iminor(inode) - HIDDEV_MINOR_BASE;
270
	int i = iminor(inode) - HIDDEV_MINOR_BASE;
262
271
Lines 267-273 static int hiddev_open(struct inode *inode, struct file *file) Link Here
267
		return -ENOMEM;
276
		return -ENOMEM;
268
277
269
	list->hiddev = hiddev_table[i];
278
	list->hiddev = hiddev_table[i];
279
280
	spin_lock_irqsave(&list->hiddev->list_lock, flags);
270
	list_add_tail(&list->node, &hiddev_table[i]->list);
281
	list_add_tail(&list->node, &hiddev_table[i]->list);
282
	spin_unlock_irqrestore(&list->hiddev->list_lock, flags);
283
271
	file->private_data = list;
284
	file->private_data = list;
272
285
273
	if (!list->hiddev->open++)
286
	if (!list->hiddev->open++)
Lines 773-778 int hiddev_connect(struct hid_device *hid) Link Here
773
786
774
	init_waitqueue_head(&hiddev->wait);
787
	init_waitqueue_head(&hiddev->wait);
775
	INIT_LIST_HEAD(&hiddev->list);
788
	INIT_LIST_HEAD(&hiddev->list);
789
	spin_lock_init(&hiddev->list_lock);
776
	hiddev->hid = hid;
790
	hiddev->hid = hid;
777
	hiddev->exist = 1;
791
	hiddev->exist = 1;
778
792
(-)a/fs/fat/dir.c (-99 / +100 lines)
Lines 422-428 EODir: Link Here
422
EXPORT_SYMBOL_GPL(fat_search_long);
422
EXPORT_SYMBOL_GPL(fat_search_long);
423
423
424
struct fat_ioctl_filldir_callback {
424
struct fat_ioctl_filldir_callback {
425
	struct dirent __user *dirent;
425
	void __user *dirent;
426
	int result;
426
	int result;
427
	/* for dir ioctl */
427
	/* for dir ioctl */
428
	const char *longname;
428
	const char *longname;
Lines 647-708 static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir) Link Here
647
	return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
647
	return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
648
}
648
}
649
649
650
static int fat_ioctl_filldir(void *__buf, const char *name, int name_len,
650
#define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type)			   \
651
			     loff_t offset, u64 ino, unsigned int d_type)
651
static int func(void *__buf, const char *name, int name_len,		   \
652
			     loff_t offset, u64 ino, unsigned int d_type)  \
653
{									   \
654
	struct fat_ioctl_filldir_callback *buf = __buf;			   \
655
	struct dirent_type __user *d1 = buf->dirent;			   \
656
	struct dirent_type __user *d2 = d1 + 1;				   \
657
									   \
658
	if (buf->result)						   \
659
		return -EINVAL;						   \
660
	buf->result++;							   \
661
									   \
662
	if (name != NULL) {						   \
663
		/* dirent has only short name */			   \
664
		if (name_len >= sizeof(d1->d_name))			   \
665
			name_len = sizeof(d1->d_name) - 1;		   \
666
									   \
667
		if (put_user(0, d2->d_name)			||	   \
668
		    put_user(0, &d2->d_reclen)			||	   \
669
		    copy_to_user(d1->d_name, name, name_len)	||	   \
670
		    put_user(0, d1->d_name + name_len)		||	   \
671
		    put_user(name_len, &d1->d_reclen))			   \
672
			goto efault;					   \
673
	} else {							   \
674
		/* dirent has short and long name */			   \
675
		const char *longname = buf->longname;			   \
676
		int long_len = buf->long_len;				   \
677
		const char *shortname = buf->shortname;			   \
678
		int short_len = buf->short_len;				   \
679
									   \
680
		if (long_len >= sizeof(d1->d_name))			   \
681
			long_len = sizeof(d1->d_name) - 1;		   \
682
		if (short_len >= sizeof(d1->d_name))			   \
683
			short_len = sizeof(d1->d_name) - 1;		   \
684
									   \
685
		if (copy_to_user(d2->d_name, longname, long_len)	|| \
686
		    put_user(0, d2->d_name + long_len)			|| \
687
		    put_user(long_len, &d2->d_reclen)			|| \
688
		    put_user(ino, &d2->d_ino)				|| \
689
		    put_user(offset, &d2->d_off)			|| \
690
		    copy_to_user(d1->d_name, shortname, short_len)	|| \
691
		    put_user(0, d1->d_name + short_len)			|| \
692
		    put_user(short_len, &d1->d_reclen))			   \
693
			goto efault;					   \
694
	}								   \
695
	return 0;							   \
696
efault:									   \
697
	buf->result = -EFAULT;						   \
698
	return -EFAULT;							   \
699
}
700
701
FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, dirent)
702
703
static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
704
			     void __user *dirent, filldir_t filldir,
705
			     int short_only, int both)
652
{
706
{
653
	struct fat_ioctl_filldir_callback *buf = __buf;
707
	struct fat_ioctl_filldir_callback buf;
654
	struct dirent __user *d1 = buf->dirent;
708
	int ret;
655
	struct dirent __user *d2 = d1 + 1;
709
656
710
	buf.dirent = dirent;
657
	if (buf->result)
711
	buf.result = 0;
658
		return -EINVAL;
712
	mutex_lock(&inode->i_mutex);
659
	buf->result++;
713
	ret = -ENOENT;
660
714
	if (!IS_DEADDIR(inode)) {
661
	if (name != NULL) {
715
		ret = __fat_readdir(inode, filp, &buf, filldir,
662
		/* dirent has only short name */
716
				    short_only, both);
663
		if (name_len >= sizeof(d1->d_name))
664
			name_len = sizeof(d1->d_name) - 1;
665
666
		if (put_user(0, d2->d_name)			||
667
		    put_user(0, &d2->d_reclen)			||
668
		    copy_to_user(d1->d_name, name, name_len)	||
669
		    put_user(0, d1->d_name + name_len)		||
670
		    put_user(name_len, &d1->d_reclen))
671
			goto efault;
672
	} else {
673
		/* dirent has short and long name */
674
		const char *longname = buf->longname;
675
		int long_len = buf->long_len;
676
		const char *shortname = buf->shortname;
677
		int short_len = buf->short_len;
678
679
		if (long_len >= sizeof(d1->d_name))
680
			long_len = sizeof(d1->d_name) - 1;
681
		if (short_len >= sizeof(d1->d_name))
682
			short_len = sizeof(d1->d_name) - 1;
683
684
		if (copy_to_user(d2->d_name, longname, long_len)	||
685
		    put_user(0, d2->d_name + long_len)			||
686
		    put_user(long_len, &d2->d_reclen)			||
687
		    put_user(ino, &d2->d_ino)				||
688
		    put_user(offset, &d2->d_off)			||
689
		    copy_to_user(d1->d_name, shortname, short_len)	||
690
		    put_user(0, d1->d_name + short_len)			||
691
		    put_user(short_len, &d1->d_reclen))
692
			goto efault;
693
	}
717
	}
694
	return 0;
718
	mutex_unlock(&inode->i_mutex);
695
efault:
719
	if (ret >= 0)
696
	buf->result = -EFAULT;
720
		ret = buf.result;
697
	return -EFAULT;
721
	return ret;
698
}
722
}
699
723
700
static int fat_dir_ioctl(struct inode * inode, struct file * filp,
724
static int fat_dir_ioctl(struct inode *inode, struct file *filp,
701
		  unsigned int cmd, unsigned long arg)
725
			 unsigned int cmd, unsigned long arg)
702
{
726
{
703
	struct fat_ioctl_filldir_callback buf;
727
	struct dirent __user *d1 = (struct dirent __user *)arg;
704
	struct dirent __user *d1;
728
	int short_only, both;
705
	int ret, short_only, both;
706
729
707
	switch (cmd) {
730
	switch (cmd) {
708
	case VFAT_IOCTL_READDIR_SHORT:
731
	case VFAT_IOCTL_READDIR_SHORT:
Lines 717-723 static int fat_dir_ioctl(struct inode * inode, struct file * filp, Link Here
717
		return fat_generic_ioctl(inode, filp, cmd, arg);
740
		return fat_generic_ioctl(inode, filp, cmd, arg);
718
	}
741
	}
719
742
720
	d1 = (struct dirent __user *)arg;
721
	if (!access_ok(VERIFY_WRITE, d1, sizeof(struct dirent[2])))
743
	if (!access_ok(VERIFY_WRITE, d1, sizeof(struct dirent[2])))
722
		return -EFAULT;
744
		return -EFAULT;
723
	/*
745
	/*
Lines 728-796 static int fat_dir_ioctl(struct inode * inode, struct file * filp, Link Here
728
	if (put_user(0, &d1->d_reclen))
750
	if (put_user(0, &d1->d_reclen))
729
		return -EFAULT;
751
		return -EFAULT;
730
752
731
	buf.dirent = d1;
753
	return fat_ioctl_readdir(inode, filp, d1, fat_ioctl_filldir,
732
	buf.result = 0;
754
				 short_only, both);
733
	mutex_lock(&inode->i_mutex);
734
	ret = -ENOENT;
735
	if (!IS_DEADDIR(inode)) {
736
		ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir,
737
				    short_only, both);
738
	}
739
	mutex_unlock(&inode->i_mutex);
740
	if (ret >= 0)
741
		ret = buf.result;
742
	return ret;
743
}
755
}
744
756
745
#ifdef CONFIG_COMPAT
757
#ifdef CONFIG_COMPAT
746
#define	VFAT_IOCTL_READDIR_BOTH32	_IOR('r', 1, struct compat_dirent[2])
758
#define	VFAT_IOCTL_READDIR_BOTH32	_IOR('r', 1, struct compat_dirent[2])
747
#define	VFAT_IOCTL_READDIR_SHORT32	_IOR('r', 2, struct compat_dirent[2])
759
#define	VFAT_IOCTL_READDIR_SHORT32	_IOR('r', 2, struct compat_dirent[2])
748
760
749
static long fat_compat_put_dirent32(struct dirent *d,
761
FAT_IOCTL_FILLDIR_FUNC(fat_compat_ioctl_filldir, compat_dirent)
750
				    struct compat_dirent __user *d32)
751
{
752
        if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent)))
753
                return -EFAULT;
754
755
        __put_user(d->d_ino, &d32->d_ino);
756
        __put_user(d->d_off, &d32->d_off);
757
        __put_user(d->d_reclen, &d32->d_reclen);
758
        if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen))
759
		return -EFAULT;
760
762
761
        return 0;
763
static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
762
}
763
764
static long fat_compat_dir_ioctl(struct file *file, unsigned cmd,
765
				 unsigned long arg)
764
				 unsigned long arg)
766
{
765
{
767
	struct compat_dirent __user *p = compat_ptr(arg);
766
	struct inode *inode = filp->f_path.dentry->d_inode;
768
	int ret;
767
	struct compat_dirent __user *d1 = compat_ptr(arg);
769
	mm_segment_t oldfs = get_fs();
768
	int short_only, both;
770
	struct dirent d[2];
771
769
772
	switch (cmd) {
770
	switch (cmd) {
773
	case VFAT_IOCTL_READDIR_BOTH32:
774
		cmd = VFAT_IOCTL_READDIR_BOTH;
775
		break;
776
	case VFAT_IOCTL_READDIR_SHORT32:
771
	case VFAT_IOCTL_READDIR_SHORT32:
777
		cmd = VFAT_IOCTL_READDIR_SHORT;
772
		short_only = 1;
773
		both = 0;
774
		break;
775
	case VFAT_IOCTL_READDIR_BOTH32:
776
		short_only = 0;
777
		both = 1;
778
		break;
778
		break;
779
	default:
779
	default:
780
		return -ENOIOCTLCMD;
780
		return -ENOIOCTLCMD;
781
	}
781
	}
782
782
783
	set_fs(KERNEL_DS);
783
	if (!access_ok(VERIFY_WRITE, d1, sizeof(struct compat_dirent[2])))
784
	lock_kernel();
784
		return -EFAULT;
785
	ret = fat_dir_ioctl(file->f_path.dentry->d_inode, file,
785
	/*
786
			    cmd, (unsigned long) &d);
786
	 * Yes, we don't need this put_user() absolutely. However old
787
	unlock_kernel();
787
	 * code didn't return the right value. So, app use this value,
788
	set_fs(oldfs);
788
	 * in order to check whether it is EOF.
789
	if (ret >= 0) {
789
	 */
790
		ret |= fat_compat_put_dirent32(&d[0], p);
790
	if (put_user(0, &d1->d_reclen))
791
		ret |= fat_compat_put_dirent32(&d[1], p + 1);
791
		return -EFAULT;
792
	}
792
793
	return ret;
793
	return fat_ioctl_readdir(inode, filp, d1, fat_compat_ioctl_filldir,
794
				 short_only, both);
794
}
795
}
795
#endif /* CONFIG_COMPAT */
796
#endif /* CONFIG_COMPAT */
796
797
(-)a/fs/jfs/jfs_logmgr.c (-1 / +2 lines)
Lines 2354-2365 int jfsIOWait(void *arg) Link Here
2354
			lbmStartIO(bp);
2354
			lbmStartIO(bp);
2355
			spin_lock_irq(&log_redrive_lock);
2355
			spin_lock_irq(&log_redrive_lock);
2356
		}
2356
		}
2357
		spin_unlock_irq(&log_redrive_lock);
2358
2357
2359
		if (freezing(current)) {
2358
		if (freezing(current)) {
2359
			spin_unlock_irq(&log_redrive_lock);
2360
			refrigerator();
2360
			refrigerator();
2361
		} else {
2361
		} else {
2362
			set_current_state(TASK_INTERRUPTIBLE);
2362
			set_current_state(TASK_INTERRUPTIBLE);
2363
			spin_unlock_irq(&log_redrive_lock);
2363
			schedule();
2364
			schedule();
2364
			current->state = TASK_RUNNING;
2365
			current->state = TASK_RUNNING;
2365
		}
2366
		}
(-)a/fs/nfsd/export.c (-7 / +7 lines)
Lines 469-474 static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) Link Here
469
	nd.dentry = NULL;
469
	nd.dentry = NULL;
470
	exp.ex_path = NULL;
470
	exp.ex_path = NULL;
471
471
472
	/* fs locations */
473
	exp.ex_fslocs.locations = NULL;
474
	exp.ex_fslocs.locations_count = 0;
475
	exp.ex_fslocs.migrated = 0;
476
477
	exp.ex_uuid = NULL;
478
472
	if (mesg[mlen-1] != '\n')
479
	if (mesg[mlen-1] != '\n')
473
		return -EINVAL;
480
		return -EINVAL;
474
	mesg[mlen-1] = 0;
481
	mesg[mlen-1] = 0;
Lines 509-521 static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) Link Here
509
	if (exp.h.expiry_time == 0)
516
	if (exp.h.expiry_time == 0)
510
		goto out;
517
		goto out;
511
518
512
	/* fs locations */
513
	exp.ex_fslocs.locations = NULL;
514
	exp.ex_fslocs.locations_count = 0;
515
	exp.ex_fslocs.migrated = 0;
516
517
	exp.ex_uuid = NULL;
518
519
	/* flags */
519
	/* flags */
520
	err = get_int(&mesg, &an_int);
520
	err = get_int(&mesg, &an_int);
521
	if (err == -ENOENT)
521
	if (err == -ENOENT)
(-)a/fs/reiserfs/xattr.c (-1 / +1 lines)
Lines 68-74 static struct dentry *get_xa_root(struct super_block *sb, int flags) Link Here
68
	if (!privroot)
68
	if (!privroot)
69
		return ERR_PTR(-ENODATA);
69
		return ERR_PTR(-ENODATA);
70
70
71
	mutex_lock(&privroot->d_inode->i_mutex);
71
	mutex_lock_nested(&privroot->d_inode->i_mutex, I_MUTEX_XATTR);
72
	if (REISERFS_SB(sb)->xattr_root) {
72
	if (REISERFS_SB(sb)->xattr_root) {
73
		xaroot = dget(REISERFS_SB(sb)->xattr_root);
73
		xaroot = dget(REISERFS_SB(sb)->xattr_root);
74
		goto out;
74
		goto out;
(-)a/fs/udf/namei.c (-1 / +1 lines)
Lines 878-884 static int udf_rmdir(struct inode * dir, struct dentry * dentry) Link Here
878
			inode->i_nlink);
878
			inode->i_nlink);
879
	clear_nlink(inode);
879
	clear_nlink(inode);
880
	inode->i_size = 0;
880
	inode->i_size = 0;
881
	inode_dec_link_count(inode);
881
	inode_dec_link_count(dir);
882
	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
882
	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
883
	mark_inode_dirty(dir);
883
	mark_inode_dirty(dir);
884
884
(-)a/include/asm-arm/arch-iop13xx/iop13xx.h (-9 / +13 lines)
Lines 27-45 static inline int iop13xx_cpu_id(void) Link Here
27
#define IOP13XX_PCI_OFFSET	 IOP13XX_MAX_RAM_SIZE
27
#define IOP13XX_PCI_OFFSET	 IOP13XX_MAX_RAM_SIZE
28
28
29
/* PCI MAP
29
/* PCI MAP
30
 * 0x0000.0000 - 0x8000.0000           1:1 mapping with Physical RAM
30
 * bus range		cpu phys	cpu virt	note
31
 * 0x8000.0000 - 0x8800.0000           PCIX/PCIE memory window (128MB)
31
 * 0x0000.0000 + 2GB	(n/a)		(n/a)		inbound, 1:1 mapping with Physical RAM
32
*/
32
 * 0x8000.0000 + 928M	0x1.8000.0000   (ioremap)	PCIX outbound memory window
33
 * 0x8000.0000 + 928M	0x2.8000.0000   (ioremap)	PCIE outbound memory window
34
 * 
35
 * IO MAP
36
 * 0x1000 + 64K	0x0.fffb.1000	0xfec6.1000	PCIX outbound i/o window
37
 * 0x1000 + 64K	0x0.fffd.1000	0xfed7.1000	PCIE outbound i/o window
38
 */
33
#define IOP13XX_PCIX_IO_WINDOW_SIZE   0x10000UL
39
#define IOP13XX_PCIX_IO_WINDOW_SIZE   0x10000UL
34
#define IOP13XX_PCIX_LOWER_IO_PA      0xfffb0000UL
40
#define IOP13XX_PCIX_LOWER_IO_PA      0xfffb0000UL
35
#define IOP13XX_PCIX_LOWER_IO_VA      0xfec60000UL
41
#define IOP13XX_PCIX_LOWER_IO_VA      0xfec60000UL
36
#define IOP13XX_PCIX_LOWER_IO_BA      0x0fff0000UL
42
#define IOP13XX_PCIX_LOWER_IO_BA      0x0UL /* OIOTVR */
43
#define IOP13XX_PCIX_IO_BUS_OFFSET    0x1000UL
37
#define IOP13XX_PCIX_UPPER_IO_PA      (IOP13XX_PCIX_LOWER_IO_PA +\
44
#define IOP13XX_PCIX_UPPER_IO_PA      (IOP13XX_PCIX_LOWER_IO_PA +\
38
				       IOP13XX_PCIX_IO_WINDOW_SIZE - 1)
45
				       IOP13XX_PCIX_IO_WINDOW_SIZE - 1)
39
#define IOP13XX_PCIX_UPPER_IO_VA      (IOP13XX_PCIX_LOWER_IO_VA +\
46
#define IOP13XX_PCIX_UPPER_IO_VA      (IOP13XX_PCIX_LOWER_IO_VA +\
40
				       IOP13XX_PCIX_IO_WINDOW_SIZE - 1)
47
				       IOP13XX_PCIX_IO_WINDOW_SIZE - 1)
41
#define IOP13XX_PCIX_IO_OFFSET        (IOP13XX_PCIX_LOWER_IO_VA -\
42
				       IOP13XX_PCIX_LOWER_IO_BA)
43
#define IOP13XX_PCIX_IO_PHYS_TO_VIRT(addr) (u32) ((u32) addr -\
48
#define IOP13XX_PCIX_IO_PHYS_TO_VIRT(addr) (u32) ((u32) addr -\
44
					   (IOP13XX_PCIX_LOWER_IO_PA\
49
					   (IOP13XX_PCIX_LOWER_IO_PA\
45
					   - IOP13XX_PCIX_LOWER_IO_VA))
50
					   - IOP13XX_PCIX_LOWER_IO_VA))
Lines 65-79 static inline int iop13xx_cpu_id(void) Link Here
65
#define IOP13XX_PCIE_IO_WINDOW_SIZE   	 0x10000UL
70
#define IOP13XX_PCIE_IO_WINDOW_SIZE   	 0x10000UL
66
#define IOP13XX_PCIE_LOWER_IO_PA      	 0xfffd0000UL
71
#define IOP13XX_PCIE_LOWER_IO_PA      	 0xfffd0000UL
67
#define IOP13XX_PCIE_LOWER_IO_VA      	 0xfed70000UL
72
#define IOP13XX_PCIE_LOWER_IO_VA      	 0xfed70000UL
68
#define IOP13XX_PCIE_LOWER_IO_BA      	 0x0fff0000UL
73
#define IOP13XX_PCIE_LOWER_IO_BA      	 0x0UL  /* OIOTVR */
74
#define IOP13XX_PCIE_IO_BUS_OFFSET	 0x1000UL
69
#define IOP13XX_PCIE_UPPER_IO_PA      	 (IOP13XX_PCIE_LOWER_IO_PA +\
75
#define IOP13XX_PCIE_UPPER_IO_PA      	 (IOP13XX_PCIE_LOWER_IO_PA +\
70
					 IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
76
					 IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
71
#define IOP13XX_PCIE_UPPER_IO_VA      	 (IOP13XX_PCIE_LOWER_IO_VA +\
77
#define IOP13XX_PCIE_UPPER_IO_VA      	 (IOP13XX_PCIE_LOWER_IO_VA +\
72
					 IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
78
					 IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
73
#define IOP13XX_PCIE_UPPER_IO_BA      	 (IOP13XX_PCIE_LOWER_IO_BA +\
79
#define IOP13XX_PCIE_UPPER_IO_BA      	 (IOP13XX_PCIE_LOWER_IO_BA +\
74
					 IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
80
					 IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
75
#define IOP13XX_PCIE_IO_OFFSET        	 (IOP13XX_PCIE_LOWER_IO_VA -\
76
					 IOP13XX_PCIE_LOWER_IO_BA)
77
#define IOP13XX_PCIE_IO_PHYS_TO_VIRT(addr) (u32) ((u32) addr -\
81
#define IOP13XX_PCIE_IO_PHYS_TO_VIRT(addr) (u32) ((u32) addr -\
78
					   (IOP13XX_PCIE_LOWER_IO_PA\
82
					   (IOP13XX_PCIE_LOWER_IO_PA\
79
					   - IOP13XX_PCIE_LOWER_IO_VA))
83
					   - IOP13XX_PCIE_LOWER_IO_VA))
(-)a/include/asm-sparc64/openprom.h (-1 / +1 lines)
Lines 177-183 struct linux_nodeops { Link Here
177
/* More fun PROM structures for device probing. */
177
/* More fun PROM structures for device probing. */
178
#define PROMREG_MAX     24
178
#define PROMREG_MAX     24
179
#define PROMVADDR_MAX   16
179
#define PROMVADDR_MAX   16
180
#define PROMINTR_MAX    15
180
#define PROMINTR_MAX    32
181
181
182
struct linux_prom_registers {
182
struct linux_prom_registers {
183
	unsigned which_io;	/* hi part of physical address			*/
183
	unsigned which_io;	/* hi part of physical address			*/
(-)a/include/linux/clocksource.h (+3 lines)
Lines 48-53 struct clocksource; Link Here
48
 * @shift:		cycle to nanosecond divisor (power of two)
48
 * @shift:		cycle to nanosecond divisor (power of two)
49
 * @flags:		flags describing special properties
49
 * @flags:		flags describing special properties
50
 * @vread:		vsyscall based read
50
 * @vread:		vsyscall based read
51
 * @resume:		resume function for the clocksource, if necessary
51
 * @cycle_interval:	Used internally by timekeeping core, please ignore.
52
 * @cycle_interval:	Used internally by timekeeping core, please ignore.
52
 * @xtime_interval:	Used internally by timekeeping core, please ignore.
53
 * @xtime_interval:	Used internally by timekeeping core, please ignore.
53
 */
54
 */
Lines 61-66 struct clocksource { Link Here
61
	u32 shift;
62
	u32 shift;
62
	unsigned long flags;
63
	unsigned long flags;
63
	cycle_t (*vread)(void);
64
	cycle_t (*vread)(void);
65
	void (*resume)(void);
64
66
65
	/* timekeeping specific data, ignore */
67
	/* timekeeping specific data, ignore */
66
	cycle_t cycle_last, cycle_interval;
68
	cycle_t cycle_last, cycle_interval;
Lines 198-203 static inline void clocksource_calculate_interval(struct clocksource *c, Link Here
198
extern int clocksource_register(struct clocksource*);
200
extern int clocksource_register(struct clocksource*);
199
extern struct clocksource* clocksource_get_next(void);
201
extern struct clocksource* clocksource_get_next(void);
200
extern void clocksource_change_rating(struct clocksource *cs, int rating);
202
extern void clocksource_change_rating(struct clocksource *cs, int rating);
203
extern void clocksource_resume(void);
201
204
202
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
205
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
203
extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
206
extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
(-)a/include/linux/netdevice.h (-5 / +3 lines)
Lines 647-654 static inline void netif_start_queue(struct net_device *dev) Link Here
647
static inline void netif_wake_queue(struct net_device *dev)
647
static inline void netif_wake_queue(struct net_device *dev)
648
{
648
{
649
#ifdef CONFIG_NETPOLL_TRAP
649
#ifdef CONFIG_NETPOLL_TRAP
650
	if (netpoll_trap())
650
	if (netpoll_trap()) {
651
		clear_bit(__LINK_STATE_XOFF, &dev->state);
651
		return;
652
		return;
653
	}
652
#endif
654
#endif
653
	if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
655
	if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
654
		__netif_schedule(dev);
656
		__netif_schedule(dev);
Lines 656-665 static inline void netif_wake_queue(struct net_device *dev) Link Here
656
658
657
static inline void netif_stop_queue(struct net_device *dev)
659
static inline void netif_stop_queue(struct net_device *dev)
658
{
660
{
659
#ifdef CONFIG_NETPOLL_TRAP
660
	if (netpoll_trap())
661
		return;
662
#endif
663
	set_bit(__LINK_STATE_XOFF, &dev->state);
661
	set_bit(__LINK_STATE_XOFF, &dev->state);
664
}
662
}
665
663
(-)a/include/linux/netfilter/nf_conntrack_proto_gre.h (-18 lines)
Lines 87-110 int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, Link Here
87
/* delete keymap entries */
87
/* delete keymap entries */
88
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
88
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
89
89
90
/* get pointer to gre key, if present */
91
static inline __be32 *gre_key(struct gre_hdr *greh)
92
{
93
	if (!greh->key)
94
		return NULL;
95
	if (greh->csum || greh->routing)
96
		return (__be32 *)(greh+sizeof(*greh)+4);
97
	return (__be32 *)(greh+sizeof(*greh));
98
}
99
100
/* get pointer ot gre csum, if present */
101
static inline __sum16 *gre_csum(struct gre_hdr *greh)
102
{
103
	if (!greh->csum)
104
		return NULL;
105
	return (__sum16 *)(greh+sizeof(*greh));
106
}
107
108
extern void nf_ct_gre_keymap_flush(void);
90
extern void nf_ct_gre_keymap_flush(void);
109
extern void nf_nat_need_gre(void);
91
extern void nf_nat_need_gre(void);
110
92
(-)a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h (-19 lines)
Lines 90-114 int ip_ct_gre_keymap_add(struct ip_conntrack *ct, Link Here
90
/* delete keymap entries */
90
/* delete keymap entries */
91
void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct);
91
void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct);
92
92
93
94
/* get pointer to gre key, if present */
95
static inline __be32 *gre_key(struct gre_hdr *greh)
96
{
97
	if (!greh->key)
98
		return NULL;
99
	if (greh->csum || greh->routing)
100
		return (__be32 *) (greh+sizeof(*greh)+4);
101
	return (__be32 *) (greh+sizeof(*greh));
102
}
103
104
/* get pointer ot gre csum, if present */
105
static inline __sum16 *gre_csum(struct gre_hdr *greh)
106
{
107
	if (!greh->csum)
108
		return NULL;
109
	return (__sum16 *) (greh+sizeof(*greh));
110
}
111
112
#endif /* __KERNEL__ */
93
#endif /* __KERNEL__ */
113
94
114
#endif /* _CONNTRACK_PROTO_GRE_H */
95
#endif /* _CONNTRACK_PROTO_GRE_H */
(-)a/kernel/time/clocksource.c (+45 lines)
Lines 74-79 static struct clocksource *watchdog; Link Here
74
static struct timer_list watchdog_timer;
74
static struct timer_list watchdog_timer;
75
static DEFINE_SPINLOCK(watchdog_lock);
75
static DEFINE_SPINLOCK(watchdog_lock);
76
static cycle_t watchdog_last;
76
static cycle_t watchdog_last;
77
static int watchdog_resumed;
78
77
/*
79
/*
78
 * Interval: 0.5sec Treshold: 0.0625s
80
 * Interval: 0.5sec Treshold: 0.0625s
79
 */
81
 */
Lines 98-112 static void clocksource_watchdog(unsigned long data) Link Here
98
	struct clocksource *cs, *tmp;
100
	struct clocksource *cs, *tmp;
99
	cycle_t csnow, wdnow;
101
	cycle_t csnow, wdnow;
100
	int64_t wd_nsec, cs_nsec;
102
	int64_t wd_nsec, cs_nsec;
103
	int resumed;
101
104
102
	spin_lock(&watchdog_lock);
105
	spin_lock(&watchdog_lock);
103
106
107
	resumed = watchdog_resumed;
108
	if (unlikely(resumed))
109
		watchdog_resumed = 0;
110
104
	wdnow = watchdog->read();
111
	wdnow = watchdog->read();
105
	wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
112
	wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
106
	watchdog_last = wdnow;
113
	watchdog_last = wdnow;
107
114
108
	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
115
	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
109
		csnow = cs->read();
116
		csnow = cs->read();
117
118
		if (unlikely(resumed)) {
119
			cs->wd_last = csnow;
120
			continue;
121
		}
122
110
		/* Initialized ? */
123
		/* Initialized ? */
111
		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
124
		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
112
			if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
125
			if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
Lines 136-141 static void clocksource_watchdog(unsigned long data) Link Here
136
	}
149
	}
137
	spin_unlock(&watchdog_lock);
150
	spin_unlock(&watchdog_lock);
138
}
151
}
152
static void clocksource_resume_watchdog(void)
153
{
154
	spin_lock(&watchdog_lock);
155
	watchdog_resumed = 1;
156
	spin_unlock(&watchdog_lock);
157
}
158
139
static void clocksource_check_watchdog(struct clocksource *cs)
159
static void clocksource_check_watchdog(struct clocksource *cs)
140
{
160
{
141
	struct clocksource *cse;
161
	struct clocksource *cse;
Lines 182-190 static void clocksource_check_watchdog(struct clocksource *cs) Link Here
182
	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
202
	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
183
		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
203
		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
184
}
204
}
205
206
static inline void clocksource_resume_watchdog(void) { }
185
#endif
207
#endif
186
208
187
/**
209
/**
210
 * clocksource_resume - resume the clocksource(s)
211
 */
212
void clocksource_resume(void)
213
{
214
	struct list_head *tmp;
215
	unsigned long flags;
216
217
	spin_lock_irqsave(&clocksource_lock, flags);
218
219
	list_for_each(tmp, &clocksource_list) {
220
		struct clocksource *cs;
221
222
		cs = list_entry(tmp, struct clocksource, list);
223
		if (cs->resume)
224
			cs->resume();
225
	}
226
227
	clocksource_resume_watchdog();
228
229
	spin_unlock_irqrestore(&clocksource_lock, flags);
230
}
231
232
/**
188
 * clocksource_get_next - Returns the selected clocksource
233
 * clocksource_get_next - Returns the selected clocksource
189
 *
234
 *
190
 */
235
 */
(-)a/kernel/time/tick-common.c (-1 / +7 lines)
Lines 31-37 DEFINE_PER_CPU(struct tick_device, tick_cpu_device); Link Here
31
 */
31
 */
32
ktime_t tick_next_period;
32
ktime_t tick_next_period;
33
ktime_t tick_period;
33
ktime_t tick_period;
34
static int tick_do_timer_cpu = -1;
34
int tick_do_timer_cpu __read_mostly = -1;
35
DEFINE_SPINLOCK(tick_device_lock);
35
DEFINE_SPINLOCK(tick_device_lock);
36
36
37
/*
37
/*
Lines 295-300 static void tick_shutdown(unsigned int *cpup) Link Here
295
		clockevents_exchange_device(dev, NULL);
295
		clockevents_exchange_device(dev, NULL);
296
		td->evtdev = NULL;
296
		td->evtdev = NULL;
297
	}
297
	}
298
	/* Transfer the do_timer job away from this cpu */
299
	if (*cpup == tick_do_timer_cpu) {
300
		int cpu = first_cpu(cpu_online_map);
301
302
		tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1;
303
	}
298
	spin_unlock_irqrestore(&tick_device_lock, flags);
304
	spin_unlock_irqrestore(&tick_device_lock, flags);
299
}
305
}
300
306
(-)a/kernel/time/tick-internal.h (+1 lines)
Lines 5-10 DECLARE_PER_CPU(struct tick_device, tick_cpu_device); Link Here
5
extern spinlock_t tick_device_lock;
5
extern spinlock_t tick_device_lock;
6
extern ktime_t tick_next_period;
6
extern ktime_t tick_next_period;
7
extern ktime_t tick_period;
7
extern ktime_t tick_period;
8
extern int tick_do_timer_cpu __read_mostly;
8
9
9
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
10
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
10
extern void tick_handle_periodic(struct clock_event_device *dev);
11
extern void tick_handle_periodic(struct clock_event_device *dev);
(-)a/kernel/time/tick-sched.c (-2 / +40 lines)
Lines 221-226 void tick_nohz_stop_sched_tick(void) Link Here
221
			ts->tick_stopped = 1;
221
			ts->tick_stopped = 1;
222
			ts->idle_jiffies = last_jiffies;
222
			ts->idle_jiffies = last_jiffies;
223
		}
223
		}
224
225
		/*
226
		 * If this cpu is the one which updates jiffies, then
227
		 * give up the assignment and let it be taken by the
228
		 * cpu which runs the tick timer next, which might be
229
		 * this cpu as well. If we don't drop this here the
230
		 * jiffies might be stale and do_timer() never
231
		 * invoked.
232
		 */
233
		if (cpu == tick_do_timer_cpu)
234
			tick_do_timer_cpu = -1;
235
224
		/*
236
		/*
225
		 * calculate the expiry time for the next timer wheel
237
		 * calculate the expiry time for the next timer wheel
226
		 * timer
238
		 * timer
Lines 338-349 static void tick_nohz_handler(struct clock_event_device *dev) Link Here
338
{
350
{
339
	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
351
	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
340
	struct pt_regs *regs = get_irq_regs();
352
	struct pt_regs *regs = get_irq_regs();
353
	int cpu = smp_processor_id();
341
	ktime_t now = ktime_get();
354
	ktime_t now = ktime_get();
342
355
343
	dev->next_event.tv64 = KTIME_MAX;
356
	dev->next_event.tv64 = KTIME_MAX;
344
357
358
	/*
359
	 * Check if the do_timer duty was dropped. We don't care about
360
	 * concurrency: This happens only when the cpu in charge went
361
	 * into a long sleep. If two cpus happen to assign themself to
362
	 * this duty, then the jiffies update is still serialized by
363
	 * xtime_lock.
364
	 */
365
	if (unlikely(tick_do_timer_cpu == -1))
366
		tick_do_timer_cpu = cpu;
367
345
	/* Check, if the jiffies need an update */
368
	/* Check, if the jiffies need an update */
346
	tick_do_update_jiffies64(now);
369
	if (tick_do_timer_cpu == cpu)
370
		tick_do_update_jiffies64(now);
347
371
348
	/*
372
	/*
349
	 * When we are idle and the tick is stopped, we have to touch
373
	 * When we are idle and the tick is stopped, we have to touch
Lines 431-439 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) Link Here
431
	struct hrtimer_cpu_base *base = timer->base->cpu_base;
455
	struct hrtimer_cpu_base *base = timer->base->cpu_base;
432
	struct pt_regs *regs = get_irq_regs();
456
	struct pt_regs *regs = get_irq_regs();
433
	ktime_t now = ktime_get();
457
	ktime_t now = ktime_get();
458
	int cpu = smp_processor_id();
459
460
#ifdef CONFIG_NO_HZ
461
	/*
462
	 * Check if the do_timer duty was dropped. We don't care about
463
	 * concurrency: This happens only when the cpu in charge went
464
	 * into a long sleep. If two cpus happen to assign themself to
465
	 * this duty, then the jiffies update is still serialized by
466
	 * xtime_lock.
467
	 */
468
	if (unlikely(tick_do_timer_cpu == -1))
469
		tick_do_timer_cpu = cpu;
470
#endif
434
471
435
	/* Check, if the jiffies need an update */
472
	/* Check, if the jiffies need an update */
436
	tick_do_update_jiffies64(now);
473
	if (tick_do_timer_cpu == cpu)
474
		tick_do_update_jiffies64(now);
437
475
438
	/*
476
	/*
439
	 * Do not call, when we are not in irq context and have
477
	 * Do not call, when we are not in irq context and have
(-)a/kernel/timer.c (+2 lines)
Lines 1903-1908 unregister_time_interpolator(struct time_interpolator *ti) Link Here
1903
		prev = &curr->next;
1903
		prev = &curr->next;
1904
	}
1904
	}
1905
1905
1906
	clocksource_resume();
1907
1906
	write_seqlock_irqsave(&xtime_lock, flags);
1908
	write_seqlock_irqsave(&xtime_lock, flags);
1907
	if (ti == time_interpolator) {
1909
	if (ti == time_interpolator) {
1908
		/* we lost the best time-interpolator: */
1910
		/* we lost the best time-interpolator: */
(-)a/lib/zlib_inflate/inflate.c (-3 / +5 lines)
Lines 743-754 int zlib_inflate(z_streamp strm, int flush) Link Here
743
743
744
    strm->data_type = state->bits + (state->last ? 64 : 0) +
744
    strm->data_type = state->bits + (state->last ? 64 : 0) +
745
                      (state->mode == TYPE ? 128 : 0);
745
                      (state->mode == TYPE ? 128 : 0);
746
    if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
747
        ret = Z_BUF_ERROR;
748
746
749
    if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
747
    if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
750
            (strm->avail_out != 0 || strm->avail_in == 0))
748
            strm->avail_out != 0 && strm->avail_in == 0)
751
		return zlib_inflateSyncPacket(strm);
749
		return zlib_inflateSyncPacket(strm);
750
751
    if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
752
        ret = Z_BUF_ERROR;
753
752
    return ret;
754
    return ret;
753
}
755
}
754
756
(-)a/mm/hugetlb.c (+2 lines)
Lines 140-145 static struct page *alloc_huge_page(struct vm_area_struct *vma, Link Here
140
	return page;
140
	return page;
141
141
142
fail:
142
fail:
143
	if (vma->vm_flags & VM_MAYSHARE)
144
		resv_huge_pages++;
143
	spin_unlock(&hugetlb_lock);
145
	spin_unlock(&hugetlb_lock);
144
	return NULL;
146
	return NULL;
145
}
147
}
(-)a/mm/oom_kill.c (-4 / +6 lines)
Lines 397-402 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) Link Here
397
	struct task_struct *p;
397
	struct task_struct *p;
398
	unsigned long points = 0;
398
	unsigned long points = 0;
399
	unsigned long freed = 0;
399
	unsigned long freed = 0;
400
	int constraint;
400
401
401
	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
402
	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
402
	if (freed > 0)
403
	if (freed > 0)
Lines 411-424 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) Link Here
411
		show_mem();
412
		show_mem();
412
	}
413
	}
413
414
414
	cpuset_lock();
415
	read_lock(&tasklist_lock);
416
417
	/*
415
	/*
418
	 * Check if there were limitations on the allocation (only relevant for
416
	 * Check if there were limitations on the allocation (only relevant for
419
	 * NUMA) that may require different handling.
417
	 * NUMA) that may require different handling.
420
	 */
418
	 */
421
	switch (constrained_alloc(zonelist, gfp_mask)) {
419
	constraint = constrained_alloc(zonelist, gfp_mask);
420
	cpuset_lock();
421
	read_lock(&tasklist_lock);
422
423
	switch (constraint) {
422
	case CONSTRAINT_MEMORY_POLICY:
424
	case CONSTRAINT_MEMORY_POLICY:
423
		oom_kill_process(current, points,
425
		oom_kill_process(current, points,
424
				"No available memory (MPOL_BIND)");
426
				"No available memory (MPOL_BIND)");
(-)a/mm/slob.c (-12 / +3 lines)
Lines 150-164 static void slob_free(void *block, int size) Link Here
150
	spin_unlock_irqrestore(&slob_lock, flags);
150
	spin_unlock_irqrestore(&slob_lock, flags);
151
}
151
}
152
152
153
static int FASTCALL(find_order(int size));
154
static int fastcall find_order(int size)
155
{
156
	int order = 0;
157
	for ( ; size > 4096 ; size >>=1)
158
		order++;
159
	return order;
160
}
161
162
void *__kmalloc(size_t size, gfp_t gfp)
153
void *__kmalloc(size_t size, gfp_t gfp)
163
{
154
{
164
	slob_t *m;
155
	slob_t *m;
Lines 174-180 void *__kmalloc(size_t size, gfp_t gfp) Link Here
174
	if (!bb)
165
	if (!bb)
175
		return 0;
166
		return 0;
176
167
177
	bb->order = find_order(size);
168
	bb->order = get_order(size);
178
	bb->pages = (void *)__get_free_pages(gfp, bb->order);
169
	bb->pages = (void *)__get_free_pages(gfp, bb->order);
179
170
180
	if (bb->pages) {
171
	if (bb->pages) {
Lines 284-290 void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) Link Here
284
	if (c->size < PAGE_SIZE)
275
	if (c->size < PAGE_SIZE)
285
		b = slob_alloc(c->size, flags, c->align);
276
		b = slob_alloc(c->size, flags, c->align);
286
	else
277
	else
287
		b = (void *)__get_free_pages(flags, find_order(c->size));
278
		b = (void *)__get_free_pages(flags, get_order(c->size));
288
279
289
	if (c->ctor)
280
	if (c->ctor)
290
		c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
281
		c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
Lines 311-317 void kmem_cache_free(struct kmem_cache *c, void *b) Link Here
311
	if (c->size < PAGE_SIZE)
302
	if (c->size < PAGE_SIZE)
312
		slob_free(b, c->size);
303
		slob_free(b, c->size);
313
	else
304
	else
314
		free_pages((unsigned long)b, find_order(c->size));
305
		free_pages((unsigned long)b, get_order(c->size));
315
}
306
}
316
EXPORT_SYMBOL(kmem_cache_free);
307
EXPORT_SYMBOL(kmem_cache_free);
317
308
(-)a/net/ipv4/netfilter/ip_conntrack_core.c (-5 / +5 lines)
Lines 302-308 destroy_conntrack(struct nf_conntrack *nfct) Link Here
302
{
302
{
303
	struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
303
	struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
304
	struct ip_conntrack_protocol *proto;
304
	struct ip_conntrack_protocol *proto;
305
	struct ip_conntrack_helper *helper;
306
	typeof(ip_conntrack_destroyed) destroyed;
305
	typeof(ip_conntrack_destroyed) destroyed;
307
306
308
	DEBUGP("destroy_conntrack(%p)\n", ct);
307
	DEBUGP("destroy_conntrack(%p)\n", ct);
Lines 312-321 destroy_conntrack(struct nf_conntrack *nfct) Link Here
312
	ip_conntrack_event(IPCT_DESTROY, ct);
311
	ip_conntrack_event(IPCT_DESTROY, ct);
313
	set_bit(IPS_DYING_BIT, &ct->status);
312
	set_bit(IPS_DYING_BIT, &ct->status);
314
313
315
	helper = ct->helper;
316
	if (helper && helper->destroy)
317
		helper->destroy(ct);
318
319
	/* To make sure we don't get any weird locking issues here:
314
	/* To make sure we don't get any weird locking issues here:
320
	 * destroy_conntrack() MUST NOT be called with a write lock
315
	 * destroy_conntrack() MUST NOT be called with a write lock
321
	 * to ip_conntrack_lock!!! -HW */
316
	 * to ip_conntrack_lock!!! -HW */
Lines 356-361 destroy_conntrack(struct nf_conntrack *nfct) Link Here
356
static void death_by_timeout(unsigned long ul_conntrack)
351
static void death_by_timeout(unsigned long ul_conntrack)
357
{
352
{
358
	struct ip_conntrack *ct = (void *)ul_conntrack;
353
	struct ip_conntrack *ct = (void *)ul_conntrack;
354
	struct ip_conntrack_helper *helper;
355
356
	helper = ct->helper;
357
	if (helper && helper->destroy)
358
		helper->destroy(ct);
359
359
360
	write_lock_bh(&ip_conntrack_lock);
360
	write_lock_bh(&ip_conntrack_lock);
361
	/* Inside lock so preempt is disabled on module removal path.
361
	/* Inside lock so preempt is disabled on module removal path.
(-)a/net/ipv4/netfilter/ip_nat_proto_gre.c (-12 / +8 lines)
Lines 70-75 gre_unique_tuple(struct ip_conntrack_tuple *tuple, Link Here
70
	__be16 *keyptr;
70
	__be16 *keyptr;
71
	unsigned int min, i, range_size;
71
	unsigned int min, i, range_size;
72
72
73
	/* If there is no master conntrack we are not PPTP,
74
	   do not change tuples */
75
	if (!conntrack->master)
76
		return 0;
77
		
73
	if (maniptype == IP_NAT_MANIP_SRC)
78
	if (maniptype == IP_NAT_MANIP_SRC)
74
		keyptr = &tuple->src.u.gre.key;
79
		keyptr = &tuple->src.u.gre.key;
75
	else
80
	else
Lines 122-139 gre_manip_pkt(struct sk_buff **pskb, Link Here
122
	if (maniptype == IP_NAT_MANIP_DST) {
127
	if (maniptype == IP_NAT_MANIP_DST) {
123
		/* key manipulation is always dest */
128
		/* key manipulation is always dest */
124
		switch (greh->version) {
129
		switch (greh->version) {
125
		case 0:
130
		case GRE_VERSION_1701:
126
			if (!greh->key) {
131
			/* We do not currently NAT any GREv0 packets.
127
				DEBUGP("can't nat GRE w/o key\n");
132
			 * Try to behave like "ip_nat_proto_unknown" */
128
				break;
129
			}
130
			if (greh->csum) {
131
				/* FIXME: Never tested this code... */
132
				nf_proto_csum_replace4(gre_csum(greh), *pskb,
133
							*(gre_key(greh)),
134
							tuple->dst.u.gre.key, 0);
135
			}
136
			*(gre_key(greh)) = tuple->dst.u.gre.key;
137
			break;
133
			break;
138
		case GRE_VERSION_PPTP:
134
		case GRE_VERSION_PPTP:
139
			DEBUGP("call_id -> 0x%04x\n",
135
			DEBUGP("call_id -> 0x%04x\n",
(-)a/net/ipv4/netfilter/nf_nat_proto_gre.c (-12 / +8 lines)
Lines 72-77 gre_unique_tuple(struct nf_conntrack_tuple *tuple, Link Here
72
	__be16 *keyptr;
72
	__be16 *keyptr;
73
	unsigned int min, i, range_size;
73
	unsigned int min, i, range_size;
74
74
75
	/* If there is no master conntrack we are not PPTP,
76
	   do not change tuples */
77
	if (!conntrack->master)
78
		return 0;
79
		
75
	if (maniptype == IP_NAT_MANIP_SRC)
80
	if (maniptype == IP_NAT_MANIP_SRC)
76
		keyptr = &tuple->src.u.gre.key;
81
		keyptr = &tuple->src.u.gre.key;
77
	else
82
	else
Lines 122-139 gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff, Link Here
122
	if (maniptype != IP_NAT_MANIP_DST)
127
	if (maniptype != IP_NAT_MANIP_DST)
123
		return 1;
128
		return 1;
124
	switch (greh->version) {
129
	switch (greh->version) {
125
	case 0:
130
	case GRE_VERSION_1701:
126
		if (!greh->key) {
131
		/* We do not currently NAT any GREv0 packets.
127
			DEBUGP("can't nat GRE w/o key\n");
132
		 * Try to behave like "nf_nat_proto_unknown" */
128
			break;
129
		}
130
		if (greh->csum) {
131
			/* FIXME: Never tested this code... */
132
			nf_proto_csum_replace4(gre_csum(greh), *pskb,
133
					       *(gre_key(greh)),
134
					       tuple->dst.u.gre.key, 0);
135
		}
136
		*(gre_key(greh)) = tuple->dst.u.gre.key;
137
		break;
133
		break;
138
	case GRE_VERSION_PPTP:
134
	case GRE_VERSION_PPTP:
139
		DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
135
		DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
(-)a/net/ipv4/tcp.c (-2 / +1 lines)
Lines 1759-1766 int tcp_disconnect(struct sock *sk, int flags) Link Here
1759
	tcp_clear_retrans(tp);
1759
	tcp_clear_retrans(tp);
1760
	inet_csk_delack_init(sk);
1760
	inet_csk_delack_init(sk);
1761
	sk->sk_send_head = NULL;
1761
	sk->sk_send_head = NULL;
1762
	tp->rx_opt.saw_tstamp = 0;
1762
	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1763
	tcp_sack_reset(&tp->rx_opt);
1764
	__sk_dst_reset(sk);
1763
	__sk_dst_reset(sk);
1765
1764
1766
	BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1765
	BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
(-)a/net/ipv6/addrconf.c (-2 / +8 lines)
Lines 2281-2288 static int addrconf_notify(struct notifier_block *this, unsigned long event, Link Here
2281
		break;
2281
		break;
2282
2282
2283
	case NETDEV_CHANGENAME:
2283
	case NETDEV_CHANGENAME:
2284
#ifdef CONFIG_SYSCTL
2285
		if (idev) {
2284
		if (idev) {
2285
			snmp6_unregister_dev(idev);
2286
#ifdef CONFIG_SYSCTL
2286
			addrconf_sysctl_unregister(&idev->cnf);
2287
			addrconf_sysctl_unregister(&idev->cnf);
2287
			neigh_sysctl_unregister(idev->nd_parms);
2288
			neigh_sysctl_unregister(idev->nd_parms);
2288
			neigh_sysctl_register(dev, idev->nd_parms,
2289
			neigh_sysctl_register(dev, idev->nd_parms,
Lines 2290-2297 static int addrconf_notify(struct notifier_block *this, unsigned long event, Link Here
2290
					      &ndisc_ifinfo_sysctl_change,
2291
					      &ndisc_ifinfo_sysctl_change,
2291
					      NULL);
2292
					      NULL);
2292
			addrconf_sysctl_register(idev, &idev->cnf);
2293
			addrconf_sysctl_register(idev, &idev->cnf);
2293
		}
2294
#endif
2294
#endif
2295
			snmp6_register_dev(idev);
2296
		}
2295
		break;
2297
		break;
2296
	};
2298
	};
2297
2299
Lines 4060-4065 int __init addrconf_init(void) Link Here
4060
		return err;
4062
		return err;
4061
4063
4062
	ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev);
4064
	ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev);
4065
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
4066
	ip6_prohibit_entry.rt6i_idev = in6_dev_get(&loopback_dev);
4067
	ip6_blk_hole_entry.rt6i_idev = in6_dev_get(&loopback_dev);
4068
#endif
4063
4069
4064
	register_netdevice_notifier(&ipv6_dev_notf);
4070
	register_netdevice_notifier(&ipv6_dev_notf);
4065
4071
(-)a/net/ipv6/exthdrs.c (-4 / +12 lines)
Lines 652-657 EXPORT_SYMBOL_GPL(ipv6_invert_rthdr); Link Here
652
  Hop-by-hop options.
652
  Hop-by-hop options.
653
 **********************************/
653
 **********************************/
654
654
655
/*
656
 * Note: we cannot rely on skb->dst before we assign it in ip6_route_input().
657
 */
658
static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
659
{
660
	return skb->dst ? ip6_dst_idev(skb->dst) : __in6_dev_get(skb->dev);
661
}
662
655
/* Router Alert as of RFC 2711 */
663
/* Router Alert as of RFC 2711 */
656
664
657
static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
665
static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
Lines 678-702 static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff) Link Here
678
	if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
686
	if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
679
		LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
687
		LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
680
			       skb->nh.raw[optoff+1]);
688
			       skb->nh.raw[optoff+1]);
681
		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
689
		IP6_INC_STATS_BH(ipv6_skb_idev(skb),
682
				 IPSTATS_MIB_INHDRERRORS);
690
				 IPSTATS_MIB_INHDRERRORS);
683
		goto drop;
691
		goto drop;
684
	}
692
	}
685
693
686
	pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2));
694
	pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2));
687
	if (pkt_len <= IPV6_MAXPLEN) {
695
	if (pkt_len <= IPV6_MAXPLEN) {
688
		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
696
		IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS);
689
		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
697
		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
690
		return 0;
698
		return 0;
691
	}
699
	}
692
	if (skb->nh.ipv6h->payload_len) {
700
	if (skb->nh.ipv6h->payload_len) {
693
		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
701
		IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS);
694
		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
702
		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
695
		return 0;
703
		return 0;
696
	}
704
	}
697
705
698
	if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
706
	if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
699
		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INTRUNCATEDPKTS);
707
		IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INTRUNCATEDPKTS);
700
		goto drop;
708
		goto drop;
701
	}
709
	}
702
710
(-)a/net/ipv6/ip6_input.c (-1 / +1 lines)
Lines 235-241 int ip6_mc_input(struct sk_buff *skb) Link Here
235
	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);
235
	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);
236
236
237
	hdr = skb->nh.ipv6h;
237
	hdr = skb->nh.ipv6h;
238
	deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) ||
238
	deliver = unlikely(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) ||
239
	    ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
239
	    ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
240
240
241
	/*
241
	/*
(-)a/net/ipv6/ip6_output.c (-3 / +10 lines)
Lines 449-458 int ip6_forward(struct sk_buff *skb) Link Here
449
		 */
449
		 */
450
		if (xrlim_allow(dst, 1*HZ))
450
		if (xrlim_allow(dst, 1*HZ))
451
			ndisc_send_redirect(skb, n, target);
451
			ndisc_send_redirect(skb, n, target);
452
	} else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
452
	} else {
453
						|IPV6_ADDR_LINKLOCAL)) {
453
		int addrtype = ipv6_addr_type(&hdr->saddr);
454
454
		/* This check is security critical. */
455
		/* This check is security critical. */
455
		goto error;
456
		if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK))
457
			goto error;
458
		if (addrtype & IPV6_ADDR_LINKLOCAL) {
459
			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
460
				ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);
461
			goto error;
462
		}
456
	}
463
	}
457
464
458
	if (skb->len > dst_mtu(dst)) {
465
	if (skb->len > dst_mtu(dst)) {
(-)a/net/ipv6/proc.c (+1 lines)
Lines 236-241 int snmp6_unregister_dev(struct inet6_dev *idev) Link Here
236
		return -EINVAL;
236
		return -EINVAL;
237
	remove_proc_entry(idev->stats.proc_dir_entry->name,
237
	remove_proc_entry(idev->stats.proc_dir_entry->name,
238
			  proc_net_devsnmp6);
238
			  proc_net_devsnmp6);
239
	idev->stats.proc_dir_entry = NULL;
239
	return 0;
240
	return 0;
240
}
241
}
241
242
(-)a/net/ipv6/xfrm6_tunnel.c (-1 / +1 lines)
Lines 261-267 static int xfrm6_tunnel_rcv(struct sk_buff *skb) Link Here
261
	__be32 spi;
261
	__be32 spi;
262
262
263
	spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
263
	spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
264
	return xfrm6_rcv_spi(skb, spi);
264
	return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
265
}
265
}
266
266
267
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
267
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
(-)a/net/netfilter/nf_conntrack_core.c (-4 / +4 lines)
Lines 315-321 static void Link Here
315
destroy_conntrack(struct nf_conntrack *nfct)
315
destroy_conntrack(struct nf_conntrack *nfct)
316
{
316
{
317
	struct nf_conn *ct = (struct nf_conn *)nfct;
317
	struct nf_conn *ct = (struct nf_conn *)nfct;
318
	struct nf_conn_help *help = nfct_help(ct);
319
	struct nf_conntrack_l3proto *l3proto;
318
	struct nf_conntrack_l3proto *l3proto;
320
	struct nf_conntrack_l4proto *l4proto;
319
	struct nf_conntrack_l4proto *l4proto;
321
	typeof(nf_conntrack_destroyed) destroyed;
320
	typeof(nf_conntrack_destroyed) destroyed;
Lines 327-335 destroy_conntrack(struct nf_conntrack *nfct) Link Here
327
	nf_conntrack_event(IPCT_DESTROY, ct);
326
	nf_conntrack_event(IPCT_DESTROY, ct);
328
	set_bit(IPS_DYING_BIT, &ct->status);
327
	set_bit(IPS_DYING_BIT, &ct->status);
329
328
330
	if (help && help->helper && help->helper->destroy)
331
		help->helper->destroy(ct);
332
333
	/* To make sure we don't get any weird locking issues here:
329
	/* To make sure we don't get any weird locking issues here:
334
	 * destroy_conntrack() MUST NOT be called with a write lock
330
	 * destroy_conntrack() MUST NOT be called with a write lock
335
	 * to nf_conntrack_lock!!! -HW */
331
	 * to nf_conntrack_lock!!! -HW */
Lines 375-380 destroy_conntrack(struct nf_conntrack *nfct) Link Here
375
static void death_by_timeout(unsigned long ul_conntrack)
371
static void death_by_timeout(unsigned long ul_conntrack)
376
{
372
{
377
	struct nf_conn *ct = (void *)ul_conntrack;
373
	struct nf_conn *ct = (void *)ul_conntrack;
374
	struct nf_conn_help *help = nfct_help(ct);
375
376
	if (help && help->helper && help->helper->destroy)
377
		help->helper->destroy(ct);
378
378
379
	write_lock_bh(&nf_conntrack_lock);
379
	write_lock_bh(&nf_conntrack_lock);
380
	/* Inside lock so preempt is disabled on module removal path.
380
	/* Inside lock so preempt is disabled on module removal path.
(-)a/net/sched/sch_prio.c (-1 / +1 lines)
Lines 74-80 prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) Link Here
74
		band = res.classid;
74
		band = res.classid;
75
	}
75
	}
76
	band = TC_H_MIN(band) - 1;
76
	band = TC_H_MIN(band) - 1;
77
	if (band > q->bands)
77
	if (band >= q->bands)
78
		return q->queues[q->prio2band[0]];
78
		return q->queues[q->prio2band[0]];
79
79
80
	return q->queues[band];
80
	return q->queues[band];
(-)a/net/sctp/socket.c (-39 / +72 lines)
Lines 3847-3853 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, Link Here
3847
		memcpy(&temp, &from->ipaddr, sizeof(temp));
3847
		memcpy(&temp, &from->ipaddr, sizeof(temp));
3848
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
3848
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
3849
		addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
3849
		addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
3850
		if(space_left < addrlen)
3850
		if (space_left < addrlen)
3851
			return -ENOMEM;
3851
			return -ENOMEM;
3852
		if (copy_to_user(to, &temp, addrlen))
3852
		if (copy_to_user(to, &temp, addrlen))
3853
			return -EFAULT;
3853
			return -EFAULT;
Lines 3936-3943 done: Link Here
3936
/* Helper function that copies local addresses to user and returns the number
3936
/* Helper function that copies local addresses to user and returns the number
3937
 * of addresses copied.
3937
 * of addresses copied.
3938
 */
3938
 */
3939
static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs,
3939
static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
3940
					void __user *to)
3940
					int max_addrs, void *to,
3941
					int *bytes_copied)
3941
{
3942
{
3942
	struct list_head *pos, *next;
3943
	struct list_head *pos, *next;
3943
	struct sctp_sockaddr_entry *addr;
3944
	struct sctp_sockaddr_entry *addr;
Lines 3954-3963 static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add Link Here
3954
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
3955
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
3955
								&temp);
3956
								&temp);
3956
		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
3957
		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
3957
		if (copy_to_user(to, &temp, addrlen))
3958
		memcpy(to, &temp, addrlen);
3958
			return -EFAULT;
3959
3959
3960
		to += addrlen;
3960
		to += addrlen;
3961
		*bytes_copied += addrlen;
3961
		cnt ++;
3962
		cnt ++;
3962
		if (cnt >= max_addrs) break;
3963
		if (cnt >= max_addrs) break;
3963
	}
3964
	}
Lines 3965-3972 static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add Link Here
3965
	return cnt;
3966
	return cnt;
3966
}
3967
}
3967
3968
3968
static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
3969
static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
3969
				    void __user **to, size_t space_left)
3970
			    size_t space_left, int *bytes_copied)
3970
{
3971
{
3971
	struct list_head *pos, *next;
3972
	struct list_head *pos, *next;
3972
	struct sctp_sockaddr_entry *addr;
3973
	struct sctp_sockaddr_entry *addr;
Lines 3983-3996 static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, Link Here
3983
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
3984
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
3984
								&temp);
3985
								&temp);
3985
		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
3986
		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
3986
		if(space_left<addrlen)
3987
		if (space_left < addrlen)
3987
			return -ENOMEM;
3988
			return -ENOMEM;
3988
		if (copy_to_user(*to, &temp, addrlen))
3989
		memcpy(to, &temp, addrlen);
3989
			return -EFAULT;
3990
3990
3991
		*to += addrlen;
3991
		to += addrlen;
3992
		cnt ++;
3992
		cnt ++;
3993
		space_left -= addrlen;
3993
		space_left -= addrlen;
3994
		bytes_copied += addrlen;
3994
	}
3995
	}
3995
3996
3996
	return cnt;
3997
	return cnt;
Lines 4014-4019 static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, Link Here
4014
	int addrlen;
4015
	int addrlen;
4015
	rwlock_t *addr_lock;
4016
	rwlock_t *addr_lock;
4016
	int err = 0;
4017
	int err = 0;
4018
	void *addrs;
4019
	void *buf;
4020
	int bytes_copied = 0;
4017
4021
4018
	if (len != sizeof(struct sctp_getaddrs_old))
4022
	if (len != sizeof(struct sctp_getaddrs_old))
4019
		return -EINVAL;
4023
		return -EINVAL;
Lines 4041-4046 static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, Link Here
4041
4045
4042
	to = getaddrs.addrs;
4046
	to = getaddrs.addrs;
4043
4047
4048
	/* Allocate space for a local instance of packed array to hold all
4049
	 * the data.  We store addresses here first and then put write them
4050
	 * to the user in one shot.
4051
	 */
4052
	addrs = kmalloc(sizeof(union sctp_addr) * getaddrs.addr_num,
4053
			GFP_KERNEL);
4054
	if (!addrs)
4055
		return -ENOMEM;
4056
4044
	sctp_read_lock(addr_lock);
4057
	sctp_read_lock(addr_lock);
4045
4058
4046
	/* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4059
	/* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
Lines 4050-4087 static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, Link Here
4050
		addr = list_entry(bp->address_list.next,
4063
		addr = list_entry(bp->address_list.next,
4051
				  struct sctp_sockaddr_entry, list);
4064
				  struct sctp_sockaddr_entry, list);
4052
		if (sctp_is_any(&addr->a)) {
4065
		if (sctp_is_any(&addr->a)) {
4053
			cnt = sctp_copy_laddrs_to_user_old(sk, bp->port,
4066
			cnt = sctp_copy_laddrs_old(sk, bp->port,
4054
							   getaddrs.addr_num,
4067
						   getaddrs.addr_num,
4055
							   to);
4068
						   addrs, &bytes_copied);
4056
			if (cnt < 0) {
4057
				err = cnt;
4058
				goto unlock;
4059
			}
4060
			goto copy_getaddrs;
4069
			goto copy_getaddrs;
4061
		}
4070
		}
4062
	}
4071
	}
4063
4072
4073
	buf = addrs;
4064
	list_for_each(pos, &bp->address_list) {
4074
	list_for_each(pos, &bp->address_list) {
4065
		addr = list_entry(pos, struct sctp_sockaddr_entry, list);
4075
		addr = list_entry(pos, struct sctp_sockaddr_entry, list);
4066
		memcpy(&temp, &addr->a, sizeof(temp));
4076
		memcpy(&temp, &addr->a, sizeof(temp));
4067
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4077
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4068
		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4078
		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4069
		if (copy_to_user(to, &temp, addrlen)) {
4079
		memcpy(buf, &temp, addrlen);
4070
			err = -EFAULT;
4080
		buf += addrlen;
4071
			goto unlock;
4081
		bytes_copied += addrlen;
4072
		}
4073
		to += addrlen;
4074
		cnt ++;
4082
		cnt ++;
4075
		if (cnt >= getaddrs.addr_num) break;
4083
		if (cnt >= getaddrs.addr_num) break;
4076
	}
4084
	}
4077
4085
4078
copy_getaddrs:
4086
copy_getaddrs:
4087
	sctp_read_unlock(addr_lock);
4088
4089
	/* copy the entire address list into the user provided space */
4090
	if (copy_to_user(to, addrs, bytes_copied)) {
4091
		err = -EFAULT;
4092
		goto error;
4093
	}
4094
4095
	/* copy the leading structure back to user */
4079
	getaddrs.addr_num = cnt;
4096
	getaddrs.addr_num = cnt;
4080
	if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old)))
4097
	if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old)))
4081
		err = -EFAULT;
4098
		err = -EFAULT;
4082
4099
4083
unlock:
4100
error:
4084
	sctp_read_unlock(addr_lock);
4101
	kfree(addrs);
4085
	return err;
4102
	return err;
4086
}
4103
}
4087
4104
Lines 4101-4107 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, Link Here
4101
	rwlock_t *addr_lock;
4118
	rwlock_t *addr_lock;
4102
	int err = 0;
4119
	int err = 0;
4103
	size_t space_left;
4120
	size_t space_left;
4104
	int bytes_copied;
4121
	int bytes_copied = 0;
4122
	void *addrs;
4123
	void *buf;
4105
4124
4106
	if (len <= sizeof(struct sctp_getaddrs))
4125
	if (len <= sizeof(struct sctp_getaddrs))
4107
		return -EINVAL;
4126
		return -EINVAL;
Lines 4129-4134 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, Link Here
4129
	to = optval + offsetof(struct sctp_getaddrs,addrs);
4148
	to = optval + offsetof(struct sctp_getaddrs,addrs);
4130
	space_left = len - sizeof(struct sctp_getaddrs) -
4149
	space_left = len - sizeof(struct sctp_getaddrs) -
4131
			 offsetof(struct sctp_getaddrs,addrs);
4150
			 offsetof(struct sctp_getaddrs,addrs);
4151
	addrs = kmalloc(space_left, GFP_KERNEL);
4152
	if (!addrs)
4153
		return -ENOMEM;
4132
4154
4133
	sctp_read_lock(addr_lock);
4155
	sctp_read_lock(addr_lock);
4134
4156
Lines 4139-4179 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, Link Here
4139
		addr = list_entry(bp->address_list.next,
4161
		addr = list_entry(bp->address_list.next,
4140
				  struct sctp_sockaddr_entry, list);
4162
				  struct sctp_sockaddr_entry, list);
4141
		if (sctp_is_any(&addr->a)) {
4163
		if (sctp_is_any(&addr->a)) {
4142
			cnt = sctp_copy_laddrs_to_user(sk, bp->port,
4164
			cnt = sctp_copy_laddrs(sk, bp->port, addrs,
4143
						       &to, space_left);
4165
						space_left, &bytes_copied);
4144
			if (cnt < 0) {
4166
			if (cnt < 0) {
4145
				err = cnt;
4167
				err = cnt;
4146
				goto unlock;
4168
				goto error;
4147
			}
4169
			}
4148
			goto copy_getaddrs;
4170
			goto copy_getaddrs;
4149
		}
4171
		}
4150
	}
4172
	}
4151
4173
4174
	buf = addrs;
4152
	list_for_each(pos, &bp->address_list) {
4175
	list_for_each(pos, &bp->address_list) {
4153
		addr = list_entry(pos, struct sctp_sockaddr_entry, list);
4176
		addr = list_entry(pos, struct sctp_sockaddr_entry, list);
4154
		memcpy(&temp, &addr->a, sizeof(temp));
4177
		memcpy(&temp, &addr->a, sizeof(temp));
4155
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4178
		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4156
		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4179
		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4157
		if(space_left < addrlen)
4180
		if (space_left < addrlen) {
4158
			return -ENOMEM; /*fixme: right error?*/
4181
			err =  -ENOMEM; /*fixme: right error?*/
4159
		if (copy_to_user(to, &temp, addrlen)) {
4182
			goto error;
4160
			err = -EFAULT;
4161
			goto unlock;
4162
		}
4183
		}
4163
		to += addrlen;
4184
		memcpy(buf, &temp, addrlen);
4185
		buf += addrlen;
4186
		bytes_copied += addrlen;
4164
		cnt ++;
4187
		cnt ++;
4165
		space_left -= addrlen;
4188
		space_left -= addrlen;
4166
	}
4189
	}
4167
4190
4168
copy_getaddrs:
4191
copy_getaddrs:
4192
	sctp_read_unlock(addr_lock);
4193
4194
	if (copy_to_user(to, addrs, bytes_copied)) {
4195
		err = -EFAULT;
4196
		goto error;
4197
	}
4169
	if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
4198
	if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
4170
		return -EFAULT;
4199
		return -EFAULT;
4171
	bytes_copied = ((char __user *)to) - optval;
4172
	if (put_user(bytes_copied, optlen))
4200
	if (put_user(bytes_copied, optlen))
4173
		return -EFAULT;
4201
		return -EFAULT;
4174
4202
4175
unlock:
4203
error:
4176
	sctp_read_unlock(addr_lock);
4204
	kfree(addrs);
4177
	return err;
4205
	return err;
4178
}
4206
}
4179
4207
Lines 4961-4967 int sctp_inet_listen(struct socket *sock, int backlog) Link Here
4961
	/* Allocate HMAC for generating cookie. */
4989
	/* Allocate HMAC for generating cookie. */
4962
	if (sctp_hmac_alg) {
4990
	if (sctp_hmac_alg) {
4963
		tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
4991
		tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
4964
		if (!tfm) {
4992
		if (IS_ERR(tfm)) {
4993
			if (net_ratelimit()) {
4994
				printk(KERN_INFO
4995
				       "SCTP: failed to load transform for %s: %ld\n",
4996
					sctp_hmac_alg, PTR_ERR(tfm));
4997
			}
4965
			err = -ENOSYS;
4998
			err = -ENOSYS;
4966
			goto out;
4999
			goto out;
4967
		}
5000
		}
(-)a/net/sunrpc/auth_gss/svcauth_gss.c (-7 / +1 lines)
Lines 1196-1208 svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp) Link Here
1196
	if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
1196
	if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
1197
				integ_len))
1197
				integ_len))
1198
		BUG();
1198
		BUG();
1199
	if (resbuf->page_len == 0
1199
	if (resbuf->tail[0].iov_base == NULL) {
1200
			&& resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1201
			< PAGE_SIZE) {
1202
		BUG_ON(resbuf->tail[0].iov_len);
1203
		/* Use head for everything */
1204
		resv = &resbuf->head[0];
1205
	} else if (resbuf->tail[0].iov_base == NULL) {
1206
		if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1200
		if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1207
			goto out_err;
1201
			goto out_err;
1208
		resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1202
		resbuf->tail[0].iov_base = resbuf->head[0].iov_base
(-)a/net/xfrm/xfrm_policy.c (+4 lines)
Lines 782-787 struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete, Link Here
782
	struct hlist_head *chain;
782
	struct hlist_head *chain;
783
	struct hlist_node *entry;
783
	struct hlist_node *entry;
784
784
785
	*err = -ENOENT;
786
	if (xfrm_policy_id2dir(id) != dir)
787
		return NULL;
788
785
	*err = 0;
789
	*err = 0;
786
	write_lock_bh(&xfrm_policy_lock);
790
	write_lock_bh(&xfrm_policy_lock);
787
	chain = xfrm_policy_byidx + idx_hash(id);
791
	chain = xfrm_policy_byidx + idx_hash(id);
(-)a/scripts/basic/fixdep.c (+2 lines)
Lines 249-254 void parse_config_file(char *map, size_t len) Link Here
249
	found:
249
	found:
250
		if (!memcmp(q - 7, "_MODULE", 7))
250
		if (!memcmp(q - 7, "_MODULE", 7))
251
			q -= 7;
251
			q -= 7;
252
		if( (q-p-7) < 0 )
253
			continue;
252
		use_config(p+7, q-p-7);
254
		use_config(p+7, q-p-7);
253
	}
255
	}
254
}
256
}
(-)a/sound/pci/hda/patch_sigmatel.c (+1 lines)
Lines 1751-1756 static int stac92xx_resume(struct hda_codec *codec) Link Here
1751
1751
1752
	stac92xx_init(codec);
1752
	stac92xx_init(codec);
1753
	stac92xx_set_config_regs(codec);
1753
	stac92xx_set_config_regs(codec);
1754
	snd_hda_resume_ctls(codec, spec->mixer);
1754
	for (i = 0; i < spec->num_mixers; i++)
1755
	for (i = 0; i < spec->num_mixers; i++)
1755
		snd_hda_resume_ctls(codec, spec->mixers[i]);
1756
		snd_hda_resume_ctls(codec, spec->mixers[i]);
1756
	if (spec->multiout.dig_out_nid)
1757
	if (spec->multiout.dig_out_nid)

Return to bug 184852