patch-2.3.99-pre6 linux/arch/sparc64/mm/init.c
Next file: linux/arch/sparc64/prom/Makefile
Previous file: linux/arch/sparc64/mm/fault.c
Back to the patch index
Back to the overall index
- Lines: 346
- Date:
Wed Apr 26 12:13:17 2000
- Orig file:
v2.3.99-pre5/linux/arch/sparc64/mm/init.c
- Orig date:
Sun Mar 19 18:35:30 2000
diff -u --recursive --new-file v2.3.99-pre5/linux/arch/sparc64/mm/init.c linux/arch/sparc64/mm/init.c
@@ -1,4 +1,4 @@
-/* $Id: init.c,v 1.149 2000/03/15 14:42:58 jj Exp $
+/* $Id: init.c,v 1.151 2000/04/26 17:09:32 davem Exp $
* arch/sparc64/mm/init.c
*
* Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
@@ -738,22 +738,17 @@
*/
pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset, unsigned long color)
{
- unsigned long paddr = __get_free_pages(GFP_KERNEL, 1);
+ struct page *page = alloc_pages(GFP_KERNEL, 1);
- if (paddr) {
- struct page *page2 = mem_map + MAP_NR(paddr + PAGE_SIZE);
+ if (page) {
unsigned long *to_free;
+ unsigned long paddr;
pte_t *pte;
- /* Set count of second page, so we can free it
- * seperately later on.
- */
- atomic_set(&page2->count, 1);
-
- /* Clear out both pages now. */
+ set_page_count((page + 1), 1);
+ paddr = page_address(page);
memset((char *)paddr, 0, (PAGE_SIZE << 1));
- /* Determine which page we give to this request. */
if (!color) {
pte = (pte_t *) paddr;
to_free = (unsigned long *) (paddr + PAGE_SIZE);
@@ -804,50 +799,42 @@
}
}
-#undef DEBUG_BOOTMEM
-
extern unsigned long cmdline_memory_size;
-unsigned long __init bootmem_init(void)
+unsigned long __init bootmem_init(unsigned long *pages_avail)
{
unsigned long bootmap_size, start_pfn, end_pfn;
unsigned long end_of_phys_memory = 0UL;
- unsigned long bootmap_pfn;
+ unsigned long bootmap_pfn, bytes_avail, size;
int i;
- /* XXX It is a bit ambiguous here, whether we should
- * XXX treat the user specified mem=xxx as total wanted
- * XXX physical memory, or as a limit to the upper
- * XXX physical address we allow. For now it is the
- * XXX latter. -DaveM
- */
-#ifdef DEBUG_BOOTMEM
- prom_printf("bootmem_init: Scan sp_banks, ");
-#endif
+
+ bytes_avail = 0UL;
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
end_of_phys_memory = sp_banks[i].base_addr +
sp_banks[i].num_bytes;
+ bytes_avail += sp_banks[i].num_bytes;
if (cmdline_memory_size) {
- if (end_of_phys_memory > cmdline_memory_size) {
- if (cmdline_memory_size < sp_banks[i].base_addr) {
- end_of_phys_memory =
- sp_banks[i-1].base_addr +
- sp_banks[i-1].num_bytes;
+ if (bytes_avail > cmdline_memory_size) {
+ unsigned long slack = bytes_avail - cmdline_memory_size;
+
+ bytes_avail -= slack;
+ end_of_phys_memory -= slack;
+
+ sp_banks[i].num_bytes -= slack;
+ if (sp_banks[i].num_bytes == 0) {
sp_banks[i].base_addr = 0xdeadbeef;
- sp_banks[i].num_bytes = 0;
} else {
- sp_banks[i].num_bytes -=
- (end_of_phys_memory -
- cmdline_memory_size);
- end_of_phys_memory = cmdline_memory_size;
- sp_banks[++i].base_addr = 0xdeadbeef;
- sp_banks[i].num_bytes = 0;
+ sp_banks[i+1].num_bytes = 0;
+ sp_banks[i+1].base_addr = 0xdeadbeef;
}
break;
}
}
}
+ *pages_avail = bytes_avail >> PAGE_SHIFT;
+
/* Start with page aligned address of last symbol in kernel
* image. The kernel is hard mapped below PAGE_OFFSET in a
* 4MB locked TLB translation.
@@ -886,50 +873,40 @@
}
#endif
/* Initialize the boot-time allocator. */
-#ifdef DEBUG_BOOTMEM
- prom_printf("init_bootmem(spfn[%lx],bpfn[%lx],epfn[%lx])\n",
- start_pfn, bootmap_pfn, end_pfn);
-#endif
- bootmap_size = init_bootmem(bootmap_pfn, end_pfn);
+ bootmap_size = init_bootmem_node(0, bootmap_pfn, phys_base>>PAGE_SHIFT, end_pfn);
/* Now register the available physical memory with the
* allocator.
*/
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
-#ifdef DEBUG_BOOTMEM
- prom_printf("free_bootmem: base[%lx] size[%lx]\n",
- sp_banks[i].base_addr,
- sp_banks[i].num_bytes);
-#endif
+ for (i = 0; sp_banks[i].num_bytes != 0; i++)
free_bootmem(sp_banks[i].base_addr,
sp_banks[i].num_bytes);
- }
- /* Reserve the kernel text/data/bss, the bootmem bootmap and initrd. */
-#ifdef DEBUG_BOOTMEM
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start)
- prom_printf("reserve_bootmem: base[%lx] size[%lx]\n",
- initrd_start, initrd_end - initrd_start);
-#endif
- prom_printf("reserve_bootmem: base[%lx] size[%lx]\n",
- phys_base, (start_pfn << PAGE_SHIFT) - phys_base);
- prom_printf("reserve_bootmem: base[%lx] size[%lx]\n",
- (bootmap_pfn << PAGE_SHIFT), bootmap_size);
-#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
- reserve_bootmem(initrd_start, initrd_end - initrd_start);
+ size = initrd_end - initrd_start;
+
+ /* Resert the initrd image area. */
+ reserve_bootmem(initrd_start, size);
+ *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
initrd_start += PAGE_OFFSET;
initrd_end += PAGE_OFFSET;
}
#endif
- reserve_bootmem(phys_base, (start_pfn << PAGE_SHIFT) - phys_base);
- reserve_bootmem((bootmap_pfn << PAGE_SHIFT), bootmap_size);
+ /* Reserve the kernel text/data/bss. */
+ size = (start_pfn << PAGE_SHIFT) - phys_base;
+ reserve_bootmem(phys_base, size);
+ *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ /* Reserve the bootmem map. We do not account for it
+ * in pages_avail because we will release that memory
+ * in free_all_bootmem.
+ */
+ size = bootmap_size;
+ reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
+ *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
-#ifdef DEBUG_BOOTMEM
- prom_printf("init_bootmem: return end_pfn[%lx]\n", end_pfn);
-#endif
return end_pfn;
}
@@ -946,7 +923,7 @@
extern unsigned int sparc64_vpte_patchme2[1];
unsigned long alias_base = phys_base + PAGE_OFFSET;
unsigned long second_alias_page = 0;
- unsigned long pt, flags, end_pfn;
+ unsigned long pt, flags, end_pfn, pages_avail;
unsigned long shift = alias_base - ((unsigned long)&empty_zero_page);
set_bit(0, mmu_context_bmap);
@@ -1001,7 +978,8 @@
flushi((long)&sparc64_vpte_patchme1[0]);
/* Setup bootmem... */
- last_valid_pfn = end_pfn = bootmem_init();
+ pages_avail = 0;
+ last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
#ifdef CONFIG_SUN_SERIAL
/* This does not logically belong here, but we need to
@@ -1039,10 +1017,20 @@
flush_tlb_all();
{
- unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0};
+ unsigned long zones_size[MAX_NR_ZONES];
+ unsigned long zholes_size[MAX_NR_ZONES];
+ unsigned long npages;
+ int znum;
+
+ for (znum = 0; znum < MAX_NR_ZONES; znum++)
+ zones_size[znum] = zholes_size[znum] = 0;
+
+ npages = end_pfn - (phys_base >> PAGE_SHIFT);
+ zones_size[ZONE_DMA] = npages;
+ zholes_size[ZONE_DMA] = npages - pages_avail;
- zones_size[ZONE_DMA] = end_pfn;
- free_area_init(zones_size);
+ free_area_init_node(0, NULL, zones_size,
+ phys_base, zholes_size);
}
device_scan();
@@ -1139,9 +1127,6 @@
struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
int i;
-#ifdef DEBUG_BOOTMEM
- prom_printf("taint_real_pages: Rescan sp_banks[].\n");
-#endif
for (i = 0; i < SPARC_PHYS_BANKS; i++) {
saved_sp_banks[i].base_addr =
sp_banks[i].base_addr;
@@ -1176,10 +1161,6 @@
goto do_next_page;
}
}
-#ifdef DEBUG_BOOTMEM
- prom_printf("taint: Page went away, reserve page %lx.\n",
- old_start);
-#endif
reserve_bootmem(old_start, PAGE_SIZE);
do_next_page:
@@ -1188,70 +1169,6 @@
}
}
-void __init free_mem_map_range(struct page *first, struct page *last)
-{
- first = (struct page *) PAGE_ALIGN((unsigned long)first);
- last = (struct page *) ((unsigned long)last & PAGE_MASK);
-#ifdef DEBUG_BOOTMEM
- prom_printf("[%p,%p] ", first, last);
-#endif
- while (first < last) {
- ClearPageReserved(mem_map + MAP_NR(first));
- set_page_count(mem_map + MAP_NR(first), 1);
- free_page((unsigned long)first);
- num_physpages++;
-
- first = (struct page *)((unsigned long)first + PAGE_SIZE);
- }
-}
-
-/* Walk through holes in sp_banks regions, if the mem_map array
- * areas representing those holes consume a page or more, free
- * up such pages. This helps a lot on machines where physical
- * ram is configured such that it begins at some hugh value.
- *
- * The sp_banks array is sorted by base address.
- */
-void __init free_unused_mem_map(void)
-{
- int i;
-
-#ifdef DEBUG_BOOTMEM
- prom_printf("free_unused_mem_map: ");
-#endif
- for (i = 0; sp_banks[i].num_bytes; i++) {
- if (i == 0) {
- struct page *first, *last;
-
- first = mem_map;
- last = &mem_map[sp_banks[i].base_addr >> PAGE_SHIFT];
- free_mem_map_range(first, last);
- } else {
- struct page *first, *last;
- unsigned long prev_end;
-
- prev_end = sp_banks[i-1].base_addr +
- sp_banks[i-1].num_bytes;
- prev_end = PAGE_ALIGN(prev_end);
- first = &mem_map[prev_end >> PAGE_SHIFT];
- last = &mem_map[sp_banks[i].base_addr >> PAGE_SHIFT];
-
- free_mem_map_range(first, last);
-
- if (!sp_banks[i+1].num_bytes) {
- prev_end = sp_banks[i].base_addr +
- sp_banks[i].num_bytes;
- first = &mem_map[prev_end >> PAGE_SHIFT];
- last = &mem_map[last_valid_pfn];
- free_mem_map_range(first, last);
- }
- }
- }
-#ifdef DEBUG_BOOTMEM
- prom_printf("\n");
-#endif
-}
-
void __init mem_init(void)
{
unsigned long codepages, datapages, initpages;
@@ -1279,16 +1196,10 @@
taint_real_pages();
- max_mapnr = last_valid_pfn;
+ max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT);
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
-#ifdef DEBUG_BOOTMEM
- prom_printf("mem_init: Calling free_all_bootmem().\n");
-#endif
num_physpages = free_all_bootmem();
-#if 0
- free_unused_mem_map();
-#endif
codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
@@ -1317,19 +1228,6 @@
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10),
PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
-
- /* NOTE NOTE NOTE NOTE
- * Please keep track of things and make sure this
- * always matches the code in mm/page_alloc.c -DaveM
- */
- i = nr_free_pages() >> 7;
- if (i < 48)
- i = 48;
- if (i > 256)
- i = 256;
- freepages.min = i;
- freepages.low = i << 1;
- freepages.high = freepages.low + i;
}
void free_initmem (void)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)