patch-2.2.17 linux/mm/vmscan.c
Next file: linux/net/appletalk/aarp.c
Previous file: linux/mm/page_alloc.c
Back to the patch index
Back to the overall index
- Lines: 74
- Date:
Mon Sep 4 18:39:28 2000
- Orig file:
v2.2.16/mm/vmscan.c
- Orig date:
Mon Sep 4 18:37:10 2000
diff -u --recursive --new-file v2.2.16/mm/vmscan.c linux/mm/vmscan.c
@@ -333,7 +333,6 @@
for (; counter >= 0; counter--) {
max_cnt = 0;
- assign = 0;
pbest = NULL;
select:
read_lock(&tasklist_lock);
@@ -378,17 +377,12 @@
* cluster them so that we get good swap-out behaviour. See
* the "free_memory()" macro for details.
*/
-#define FLUSH_COUNT 8
static int do_try_to_free_pages(unsigned int gfp_mask)
{
- int priority, count, swapcount;
- int flushcount = FLUSH_COUNT;
+ int priority;
int ret = 0;
-
- /* Kswapd does nothing but freeing pages so we can do big bites. */
- if (gfp_mask == GFP_KSWAPD)
- flushcount = SWAP_CLUSTER_MAX;
- count = flushcount;
+ int swapcount;
+ int count = SWAP_CLUSTER_MAX;
lock_kernel();
@@ -413,21 +407,15 @@
}
/* Then, try to page stuff out.. */
- swapcount = flushcount;
+ swapcount = count;
while (swap_out(priority, gfp_mask)) {
+ ret = 1;
if (!--swapcount)
break;
}
shrink_dcache_memory(priority, gfp_mask);
} while (--priority >= 0);
-
- /* End with a shrink_mmap() to make sure we free something. */
- while (shrink_mmap(0, gfp_mask)) {
- ret = 1;
- if (!--count)
- goto done;
- }
done:
unlock_kernel();
@@ -507,18 +495,11 @@
* the processes needing more memory will wake us
* up on a more timely basis.
*/
- int failed = 0;
-sleep:
interruptible_sleep_on(&kswapd_wait);
- /* Enough free pages? -> call do_try_to_free_pages only once. */
- if (nr_free_pages > freepages.low) {
- do_try_to_free_pages(GFP_KSWAPD);
- goto sleep;
- }
- /* Not enough free pages? -> free pages agressively. */
+
while (nr_free_pages < freepages.high)
{
- if (do_try_to_free_pages(GFP_KSWAPD) && failed++ < 10)
+ if (do_try_to_free_pages(GFP_KSWAPD))
{
if (tsk->need_resched)
schedule();
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)