Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 788691
Collapse All | Expand All

(-)a/arch/x86/entry/vdso/vma.c (-47 / +1 lines)
Lines 298-352 static int map_vdso(const struct vdso_image *image, unsigned long addr) Link Here
298
}
298
}
299
299
300
#ifdef CONFIG_X86_64
300
#ifdef CONFIG_X86_64
301
/*
302
 * Put the vdso above the (randomized) stack with another randomized
303
 * offset.  This way there is no hole in the middle of address space.
304
 * To save memory make sure it is still in the same PTE as the stack
305
 * top.  This doesn't give that many random bits.
306
 *
307
 * Note that this algorithm is imperfect: the distribution of the vdso
308
 * start address within a PMD is biased toward the end.
309
 *
310
 * Only used for the 64-bit and x32 vdsos.
311
 */
312
static unsigned long vdso_addr(unsigned long start, unsigned len)
313
{
314
	unsigned long addr, end;
315
	unsigned offset;
316
317
	/*
318
	 * Round up the start address.  It can start out unaligned as a result
319
	 * of stack start randomization.
320
	 */
321
	start = PAGE_ALIGN(start);
322
323
	/* Round the lowest possible end address up to a PMD boundary. */
324
	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
325
	if (end >= TASK_SIZE_MAX)
326
		end = TASK_SIZE_MAX;
327
	end -= len;
328
329
	if (end > start) {
330
		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
331
		addr = start + (offset << PAGE_SHIFT);
332
	} else {
333
		addr = start;
334
	}
335
336
	/*
337
	 * Forcibly align the final address in case we have a hardware
338
	 * issue that requires alignment for performance reasons.
339
	 */
340
	addr = align_vdso_addr(addr);
341
342
	return addr;
343
}
344
345
static int map_vdso_randomized(const struct vdso_image *image)
301
static int map_vdso_randomized(const struct vdso_image *image)
346
{
302
{
347
	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
303
	return map_vdso(image, 0);
348
349
	return map_vdso(image, addr);
350
}
304
}
351
#endif
305
#endif
352
306
(-)a/arch/x86/include/asm/elf.h (-6 / +9 lines)
Lines 247-257 extern int force_personality32; Link Here
247
247
248
/*
248
/*
249
 * This is the base location for PIE (ET_DYN with INTERP) loads. On
249
 * This is the base location for PIE (ET_DYN with INTERP) loads. On
250
 * 64-bit, this is above 4GB to leave the entire 32-bit address
250
 * 64-bit, this is raised to 4GB to leave the entire 32-bit address
251
 * space open for things that want to use the area for 32-bit pointers.
251
 * space open for things that want to use the area for 32-bit pointers.
252
 */
252
 */
253
#define ELF_ET_DYN_BASE		(mmap_is_ia32() ? 0x000400000UL : \
253
#define ELF_ET_DYN_BASE		(mmap_is_ia32() ? 0x000400000UL : \
254
						  (DEFAULT_MAP_WINDOW / 3 * 2))
254
						  0x100000000UL)
255
255
256
/* This yields a mask that user programs can use to figure out what
256
/* This yields a mask that user programs can use to figure out what
257
   instruction set this CPU supports.  This could be done in user space,
257
   instruction set this CPU supports.  This could be done in user space,
Lines 333-340 extern unsigned long get_sigframe_size(void); Link Here
333
333
334
#ifdef CONFIG_X86_32
334
#ifdef CONFIG_X86_32
335
335
336
#define __STACK_RND_MASK(is32bit) (0x7ff)
336
#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
337
#define STACK_RND_MASK (0x7ff)
337
#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)
338
338
339
#define ARCH_DLINFO		ARCH_DLINFO_IA32
339
#define ARCH_DLINFO		ARCH_DLINFO_IA32
340
340
Lines 343-349 extern unsigned long get_sigframe_size(void); Link Here
343
#else /* CONFIG_X86_32 */
343
#else /* CONFIG_X86_32 */
344
344
345
/* 1GB for 64bit, 8MB for 32bit */
345
/* 1GB for 64bit, 8MB for 32bit */
346
#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
346
#ifdef CONFIG_COMPAT
347
#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
348
#else
349
#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
350
#endif
347
#define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
351
#define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
348
352
349
#define ARCH_DLINFO							\
353
#define ARCH_DLINFO							\
Lines 407-411 struct va_alignment { Link Here
407
} ____cacheline_aligned;
411
} ____cacheline_aligned;
408
412
409
extern struct va_alignment va_align;
413
extern struct va_alignment va_align;
410
extern unsigned long align_vdso_addr(unsigned long);
411
#endif /* _ASM_X86_ELF_H */
414
#endif /* _ASM_X86_ELF_H */
Line 266 Link Here
(-)a/arch/x86/kernel/process.c (-1 / +7 lines)
Lines 43-48 Link Here
43
#include <asm/io_bitmap.h>
43
#include <asm/io_bitmap.h>
44
#include <asm/proto.h>
44
#include <asm/proto.h>
45
#include <asm/frame.h>
45
#include <asm/frame.h>
46
#include <asm/elf.h>
47
#include <linux/sizes.h>
46
48
47
#include "process.h"
49
#include "process.h"
48
50
Lines 625-630 void speculation_ctrl_update_current(void) Link Here
625
static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
627
static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
626
{
628
{
627
	unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
629
	unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
630
	BUG_ON(cr4 != __read_cr4());
628
631
629
	newval = cr4 ^ mask;
632
	newval = cr4 ^ mask;
630
	if (newval != cr4) {
633
	if (newval != cr4) {
Lines 937-943 unsigned long arch_align_stack(unsigned long sp) Link Here
937
940
938
unsigned long arch_randomize_brk(struct mm_struct *mm)
941
unsigned long arch_randomize_brk(struct mm_struct *mm)
939
{
942
{
940
	return randomize_page(mm->brk, 0x02000000);
943
	if (mmap_is_ia32())
944
		return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
945
	else
946
		return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
941
}
947
}
942
948
943
/*
949
/*
(-)a/arch/x86/kernel/sys_x86_64.c (-12 / +2 lines)
Lines 52-64 static unsigned long get_align_bits(void) Link Here
52
	return va_align.bits & get_align_mask();
52
	return va_align.bits & get_align_mask();
53
}
53
}
54
54
55
unsigned long align_vdso_addr(unsigned long addr)
56
{
57
	unsigned long align_mask = get_align_mask();
58
	addr = (addr + align_mask) & ~align_mask;
59
	return addr | get_align_bits();
60
}
61
62
static int __init control_va_addr_alignment(char *str)
55
static int __init control_va_addr_alignment(char *str)
63
{
56
{
64
	/* guard against enabling this on other CPU families */
57
	/* guard against enabling this on other CPU families */
Lines 113-122 static void find_start_end(unsigned long addr, unsigned long flags, Link Here
113
	}
106
	}
114
107
115
	*begin	= get_mmap_base(1);
108
	*begin	= get_mmap_base(1);
116
	if (in_32bit_syscall())
109
	*end	= get_mmap_base(0);
117
		*end = task_size_32bit();
118
	else
119
		*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
120
}
110
}
121
111
122
unsigned long
112
unsigned long
Lines 193-199 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, Link Here
193
183
194
	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
184
	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
195
	info.length = len;
185
	info.length = len;
196
	info.low_limit = PAGE_SIZE;
186
	info.low_limit = get_mmap_base(1);
197
	info.high_limit = get_mmap_base(0);
187
	info.high_limit = get_mmap_base(0);
198
188
199
	/*
189
	/*

Return to bug 788691