patch-2.3.99-pre6 linux/mm/page_alloc.c
Next file: linux/mm/slab.c
Previous file: linux/mm/mremap.c
Back to the patch index
Back to the overall index
- Lines: 60
- Date:
Wed Apr 26 15:06:25 2000
- Orig file:
v2.3.99-pre5/linux/mm/page_alloc.c
- Orig date:
Tue Apr 11 15:09:25 2000
diff -u --recursive --new-file v2.3.99-pre5/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -25,7 +25,7 @@
#endif
int nr_swap_pages = 0;
-int nr_lru_pages;
+int nr_lru_pages = 0;
pg_data_t *pgdat_list = (pg_data_t *)0;
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
@@ -273,6 +273,8 @@
struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
{
zone_t **zone = zonelist->zones;
+ int gfp_mask = zonelist->gfp_mask;
+ static int low_on_memory;
/*
* If this is a recursive call, we'd better
@@ -282,6 +284,11 @@
if (current->flags & PF_MEMALLOC)
goto allocate_ok;
+ /* If we're a memory hog, unmap some pages */
+ if (current->hog && low_on_memory &&
+ (gfp_mask & __GFP_WAIT))
+ swap_out(4, gfp_mask);
+
/*
* (If anyone calls gfp from interrupts nonatomically then it
* will sooner or later tripped up by a schedule().)
@@ -299,11 +306,13 @@
/* Are we supposed to free memory? Don't make it worse.. */
if (!z->zone_wake_kswapd && z->free_pages > z->pages_low) {
struct page *page = rmqueue(z, order);
+ low_on_memory = 0;
if (page)
return page;
}
}
+ low_on_memory = 1;
/*
* Ok, no obvious zones were available, start
* balancing things a bit..
@@ -530,6 +539,7 @@
freepages.min += i;
freepages.low += i * 2;
freepages.high += i * 3;
+ memlist_init(&lru_cache);
/*
* Some architectures (with lots of mem and discontinous memory
@@ -609,7 +619,6 @@
unsigned long bitmap_size;
memlist_init(&zone->free_area[i].free_list);
- memlist_init(&zone->lru_cache);
mask += mask;
size = (size + ~mask) & mask;
bitmap_size = size >> i;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)