Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 127095
Collapse All | Expand All

(-)linux-2.6.16-rc6-git4/arch/sparc64/Kconfig (+6 lines)
Lines 186-191 Link Here
186
186
187
endmenu
187
endmenu
188
188
189
config ARCH_SPARSEMEM_ENABLE
190
	def_bool y
191
192
config ARCH_SPARSEMEM_DEFAULT
193
	def_bool y
194
189
source "mm/Kconfig"
195
source "mm/Kconfig"
190
196
191
config GENERIC_ISA_DMA
197
config GENERIC_ISA_DMA
(-)linux-2.6.16-rc6-git4/arch/sparc64/kernel/sparc64_ksyms.c (-7 lines)
Lines 95-103 Link Here
95
95
96
extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
96
extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
97
97
98
extern unsigned long phys_base;
99
extern unsigned long pfn_base;
100
101
extern unsigned int sys_call_table[];
98
extern unsigned int sys_call_table[];
102
99
103
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
100
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
Lines 342-352 Link Here
342
EXPORT_SYMBOL(__bzero_noasi);
339
EXPORT_SYMBOL(__bzero_noasi);
343
340
344
/* Various address conversion macros use this. */
341
/* Various address conversion macros use this. */
345
EXPORT_SYMBOL(phys_base);
346
EXPORT_SYMBOL(pfn_base);
347
EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
342
EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
348
EXPORT_SYMBOL(page_to_pfn);
349
EXPORT_SYMBOL(pfn_to_page);
350
343
351
/* No version information on this, heavily used in inline asm,
344
/* No version information on this, heavily used in inline asm,
352
 * and will always be 'void __ret_efault(void)'.
345
 * and will always be 'void __ret_efault(void)'.
(-)linux-2.6.16-rc6-git4/arch/sparc64/mm/init.c (-42 / +102 lines)
Lines 111-121 Link Here
111
111
112
unsigned long *sparc64_valid_addr_bitmap __read_mostly;
112
unsigned long *sparc64_valid_addr_bitmap __read_mostly;
113
113
114
/* Ugly, but necessary... -DaveM */
114
/* Kernel physical address base and size in bytes.  */
115
unsigned long phys_base __read_mostly;
116
unsigned long kern_base __read_mostly;
115
unsigned long kern_base __read_mostly;
117
unsigned long kern_size __read_mostly;
116
unsigned long kern_size __read_mostly;
118
unsigned long pfn_base __read_mostly;
119
117
120
/* get_new_mmu_context() uses "cache + 1".  */
118
/* get_new_mmu_context() uses "cache + 1".  */
121
DEFINE_SPINLOCK(ctx_alloc_lock);
119
DEFINE_SPINLOCK(ctx_alloc_lock);
Lines 186-193 Link Here
186
}
184
}
187
185
188
#define PG_dcache_dirty		PG_arch_1
186
#define PG_dcache_dirty		PG_arch_1
189
#define PG_dcache_cpu_shift	24
187
#define PG_dcache_cpu_shift	24UL
190
#define PG_dcache_cpu_mask	(256 - 1)
188
#define PG_dcache_cpu_mask	(256UL - 1UL)
191
189
192
#if NR_CPUS > 256
190
#if NR_CPUS > 256
193
#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
191
#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
Lines 320-335 Link Here
320
	}
318
	}
321
}
319
}
322
320
323
unsigned long page_to_pfn(struct page *page)
324
{
325
	return (unsigned long) ((page - mem_map) + pfn_base);
326
}
327
328
struct page *pfn_to_page(unsigned long pfn)
329
{
330
	return (mem_map + (pfn - pfn_base));
331
}
332
333
void show_mem(void)
321
void show_mem(void)
334
{
322
{
335
	printk("Mem-info:\n");
323
	printk("Mem-info:\n");
Lines 1196-1204 Link Here
1196
1184
1197
extern unsigned long cmdline_memory_size;
1185
extern unsigned long cmdline_memory_size;
1198
1186
1199
unsigned long __init bootmem_init(unsigned long *pages_avail)
1187
/* Find a free area for the bootmem map, avoiding the kernel image
1188
 * and the initial ramdisk.
1189
 */
1190
static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
1191
					       unsigned long end_pfn)
1200
{
1192
{
1201
	unsigned long bootmap_size, start_pfn, end_pfn;
1193
	unsigned long avoid_start, avoid_end, bootmap_size;
1194
	int i;
1195
1196
	bootmap_size = ((end_pfn - start_pfn) + 7) / 8;
1197
	bootmap_size = ALIGN(bootmap_size, sizeof(long));
1198
1199
	avoid_start = avoid_end = 0;
1200
#ifdef CONFIG_BLK_DEV_INITRD
1201
	avoid_start = initrd_start;
1202
	avoid_end = PAGE_ALIGN(initrd_end);
1203
#endif
1204
1205
#ifdef CONFIG_DEBUG_BOOTMEM
1206
	prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n",
1207
		    kern_base, PAGE_ALIGN(kern_base + kern_size),
1208
		    avoid_start, avoid_end);
1209
#endif
1210
	for (i = 0; i < pavail_ents; i++) {
1211
		unsigned long start, end;
1212
1213
		start = pavail[i].phys_addr;
1214
		end = start + pavail[i].reg_size;
1215
1216
		while (start < end) {
1217
			if (start >= kern_base &&
1218
			    start < PAGE_ALIGN(kern_base + kern_size)) {
1219
				start = PAGE_ALIGN(kern_base + kern_size);
1220
				continue;
1221
			}
1222
			if (start >= avoid_start && start < avoid_end) {
1223
				start = avoid_end;
1224
				continue;
1225
			}
1226
1227
			if ((end - start) < bootmap_size)
1228
				break;
1229
1230
			if (start < kern_base &&
1231
			    (start + bootmap_size) > kern_base) {
1232
				start = PAGE_ALIGN(kern_base + kern_size);
1233
				continue;
1234
			}
1235
1236
			if (start < avoid_start &&
1237
			    (start + bootmap_size) > avoid_start) {
1238
				start = avoid_end;
1239
				continue;
1240
			}
1241
1242
			/* OK, it doesn't overlap anything, use it.  */
1243
#ifdef CONFIG_DEBUG_BOOTMEM
1244
			prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n",
1245
				    start >> PAGE_SHIFT, start);
1246
#endif
1247
			return start >> PAGE_SHIFT;
1248
		}
1249
	}
1250
1251
	prom_printf("Cannot find free area for bootmap, aborting.\n");
1252
	prom_halt();
1253
}
1254
1255
static unsigned long __init bootmem_init(unsigned long *pages_avail,
1256
					 unsigned long phys_base)
1257
{
1258
	unsigned long bootmap_size, end_pfn;
1202
	unsigned long end_of_phys_memory = 0UL;
1259
	unsigned long end_of_phys_memory = 0UL;
1203
	unsigned long bootmap_pfn, bytes_avail, size;
1260
	unsigned long bootmap_pfn, bytes_avail, size;
1204
	int i;
1261
	int i;
Lines 1236-1249 Link Here
1236
1293
1237
	*pages_avail = bytes_avail >> PAGE_SHIFT;
1294
	*pages_avail = bytes_avail >> PAGE_SHIFT;
1238
1295
1239
	/* Start with page aligned address of last symbol in kernel
1240
	 * image.  The kernel is hard mapped below PAGE_OFFSET in a
1241
	 * 4MB locked TLB translation.
1242
	 */
1243
	start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
1244
1245
	bootmap_pfn = start_pfn;
1246
1247
	end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1296
	end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1248
1297
1249
#ifdef CONFIG_BLK_DEV_INITRD
1298
#ifdef CONFIG_BLK_DEV_INITRD
Lines 1260-1282 Link Here
1260
		                 	 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1309
		                 	 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1261
			       initrd_end, end_of_phys_memory);
1310
			       initrd_end, end_of_phys_memory);
1262
			initrd_start = 0;
1311
			initrd_start = 0;
1263
		}
1312
			initrd_end = 0;
1264
		if (initrd_start) {
1265
			if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
1266
			    initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
1267
				bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
1268
		}
1313
		}
1269
	}
1314
	}
1270
#endif	
1315
#endif	
1271
	/* Initialize the boot-time allocator. */
1316
	/* Initialize the boot-time allocator. */
1272
	max_pfn = max_low_pfn = end_pfn;
1317
	max_pfn = max_low_pfn = end_pfn;
1273
	min_low_pfn = pfn_base;
1318
	min_low_pfn = (phys_base >> PAGE_SHIFT);
1319
1320
	bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn);
1274
1321
1275
#ifdef CONFIG_DEBUG_BOOTMEM
1322
#ifdef CONFIG_DEBUG_BOOTMEM
1276
	prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1323
	prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1277
		    min_low_pfn, bootmap_pfn, max_low_pfn);
1324
		    min_low_pfn, bootmap_pfn, max_low_pfn);
1278
#endif
1325
#endif
1279
	bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1326
	bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
1327
					 min_low_pfn, end_pfn);
1280
1328
1281
	/* Now register the available physical memory with the
1329
	/* Now register the available physical memory with the
1282
	 * allocator.
1330
	 * allocator.
Lines 1324-1329 Link Here
1324
	reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1372
	reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1325
	*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1373
	*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1326
1374
1375
	for (i = 0; i < pavail_ents; i++) {
1376
		unsigned long start_pfn, end_pfn;
1377
1378
		start_pfn = pavail[i].phys_addr >> PAGE_SHIFT;
1379
		end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT));
1380
#ifdef CONFIG_DEBUG_BOOTMEM
1381
		prom_printf("memory_present(0, %lx, %lx)\n",
1382
			    start_pfn, end_pfn);
1383
#endif
1384
		memory_present(0, start_pfn, end_pfn);
1385
	}
1386
1387
	sparse_init();
1388
1327
	return end_pfn;
1389
	return end_pfn;
1328
}
1390
}
1329
1391
Lines 1448-1454 Link Here
1448
1510
1449
void __init paging_init(void)
1511
void __init paging_init(void)
1450
{
1512
{
1451
	unsigned long end_pfn, pages_avail, shift;
1513
	unsigned long end_pfn, pages_avail, shift, phys_base;
1452
	unsigned long real_end, i;
1514
	unsigned long real_end, i;
1453
1515
1454
	/* Find available physical memory... */
1516
	/* Find available physical memory... */
Lines 1458-1465 Link Here
1458
	for (i = 0; i < pavail_ents; i++)
1520
	for (i = 0; i < pavail_ents; i++)
1459
		phys_base = min(phys_base, pavail[i].phys_addr);
1521
		phys_base = min(phys_base, pavail[i].phys_addr);
1460
1522
1461
	pfn_base = phys_base >> PAGE_SHIFT;
1462
1463
	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1523
	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1464
	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1524
	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1465
1525
Lines 1506-1512 Link Here
1506
1566
1507
	/* Setup bootmem... */
1567
	/* Setup bootmem... */
1508
	pages_avail = 0;
1568
	pages_avail = 0;
1509
	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1569
	last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
1570
1571
	max_mapnr = last_valid_pfn;
1510
1572
1511
#ifdef CONFIG_DEBUG_PAGEALLOC
1573
#ifdef CONFIG_DEBUG_PAGEALLOC
1512
	kernel_physical_mapping_init();
1574
	kernel_physical_mapping_init();
Lines 1515-1532 Link Here
1515
	{
1577
	{
1516
		unsigned long zones_size[MAX_NR_ZONES];
1578
		unsigned long zones_size[MAX_NR_ZONES];
1517
		unsigned long zholes_size[MAX_NR_ZONES];
1579
		unsigned long zholes_size[MAX_NR_ZONES];
1518
		unsigned long npages;
1519
		int znum;
1580
		int znum;
1520
1581
1521
		for (znum = 0; znum < MAX_NR_ZONES; znum++)
1582
		for (znum = 0; znum < MAX_NR_ZONES; znum++)
1522
			zones_size[znum] = zholes_size[znum] = 0;
1583
			zones_size[znum] = zholes_size[znum] = 0;
1523
1584
1524
		npages = end_pfn - pfn_base;
1585
		zones_size[ZONE_DMA] = end_pfn;
1525
		zones_size[ZONE_DMA] = npages;
1586
		zholes_size[ZONE_DMA] = end_pfn - pages_avail;
1526
		zholes_size[ZONE_DMA] = npages - pages_avail;
1527
1587
1528
		free_area_init_node(0, &contig_page_data, zones_size,
1588
		free_area_init_node(0, &contig_page_data, zones_size,
1529
				    phys_base >> PAGE_SHIFT, zholes_size);
1589
				    __pa(PAGE_OFFSET) >> PAGE_SHIFT,
1590
				    zholes_size);
1530
	}
1591
	}
1531
1592
1532
	device_scan();
1593
	device_scan();
Lines 1596-1602 Link Here
1596
1657
1597
	taint_real_pages();
1658
	taint_real_pages();
1598
1659
1599
	max_mapnr = last_valid_pfn - pfn_base;
1600
	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1660
	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1601
1661
1602
#ifdef CONFIG_DEBUG_BOOTMEM
1662
#ifdef CONFIG_DEBUG_BOOTMEM
(-)linux-2.6.16-rc6-git4/include/asm-sparc64/numnodes.h (+6 lines)
Line 0 Link Here
1
#ifndef _SPARC64_NUMNODES_H
2
#define _SPARC64_NUMNODES_H
3
4
#define NODES_SHIFT	0
5
6
#endif /* !(_SPARC64_NUMNODES_H) */
(-)linux-2.6.16-rc6-git4/include/asm-sparc64/page.h (-8 / +1 lines)
Lines 124-140 Link Here
124
#define __pa(x)			((unsigned long)(x) - PAGE_OFFSET)
124
#define __pa(x)			((unsigned long)(x) - PAGE_OFFSET)
125
#define __va(x)			((void *)((unsigned long) (x) + PAGE_OFFSET))
125
#define __va(x)			((void *)((unsigned long) (x) + PAGE_OFFSET))
126
126
127
/* PFNs are real physical page numbers.  However, mem_map only begins to record
127
#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
128
 * per-page information starting at pfn_base.  This is to handle systems where
129
 * the first physical page in the machine is at some huge physical address,
130
 * such as 4GB.   This is common on a partitioned E10000, for example.
131
 */
132
extern struct page *pfn_to_page(unsigned long pfn);
133
extern unsigned long page_to_pfn(struct page *);
134
128
135
#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
129
#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
136
130
137
#define pfn_valid(pfn)		(((pfn)-(pfn_base)) < max_mapnr)
138
#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
131
#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
139
132
140
#define virt_to_phys __pa
133
#define virt_to_phys __pa
(-)linux-2.6.16-rc6-git4/include/asm-sparc64/pgtable.h (-3 lines)
Lines 212-220 Link Here
212
212
213
#ifndef __ASSEMBLY__
213
#ifndef __ASSEMBLY__
214
214
215
extern unsigned long phys_base;
216
extern unsigned long pfn_base;
217
218
extern struct page *mem_map_zero;
215
extern struct page *mem_map_zero;
219
#define ZERO_PAGE(vaddr)	(mem_map_zero)
216
#define ZERO_PAGE(vaddr)	(mem_map_zero)
220
217
(-)linux-2.6.16-rc6-git4/include/asm-sparc64/sparsemem.h (+12 lines)
Line 0 Link Here
1
#ifndef _SPARC64_SPARSEMEM_H
2
#define _SPARC64_SPARSEMEM_H
3
4
#ifdef __KERNEL__
5
6
#define SECTION_SIZE_BITS       26
7
#define MAX_PHYSADDR_BITS       42
8
#define MAX_PHYSMEM_BITS        42
9
10
#endif /* !(__KERNEL__) */
11
12
#endif /* !(_SPARC64_SPARSEMEM_H) */

Return to bug 127095