patch-2.3.23 linux/mm/vmscan.c
Next file: linux/net/core/dst.c
Previous file: linux/mm/vmalloc.c
Back to the patch index
Back to the overall index
- Lines: 225
- Date:
Tue Oct 19 10:22:19 1999
- Orig file:
v2.3.22/linux/mm/vmscan.c
- Orig date:
Sat Oct 9 11:47:50 1999
diff -u --recursive --new-file v2.3.22/linux/mm/vmscan.c linux/mm/vmscan.c
@@ -17,7 +17,7 @@
#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/init.h>
-#include <linux/bigmem.h>
+#include <linux/highmem.h>
#include <asm/pgtable.h>
@@ -34,23 +34,16 @@
*/
static int try_to_swap_out(struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask)
{
- pte_t pte;
- unsigned long entry;
- unsigned long page_addr;
+ pte_t pte, entry;
struct page * page;
pte = *page_table;
if (!pte_present(pte))
goto out_failed;
- page_addr = pte_page(pte);
- if (MAP_NR(page_addr) >= max_mapnr)
+ page = pte_page(pte);
+ if (page-mem_map >= max_mapnr)
goto out_failed;
- page = mem_map + MAP_NR(page_addr);
- spin_lock(&vma->vm_mm->page_table_lock);
- if (pte_val(pte) != pte_val(*page_table))
- goto out_failed_unlock;
-
/* Don't look at this pte if it's been accessed recently. */
if (pte_young(pte)) {
/*
@@ -59,14 +52,14 @@
*/
set_pte(page_table, pte_mkold(pte));
set_bit(PG_referenced, &page->flags);
- goto out_failed_unlock;
+ goto out_failed;
}
if (PageReserved(page)
|| PageLocked(page)
|| ((gfp_mask & __GFP_DMA) && !PageDMA(page))
- || (!(gfp_mask & __GFP_BIGMEM) && PageBIGMEM(page)))
- goto out_failed_unlock;
+ || (!(gfp_mask & __GFP_HIGHMEM) && PageHighMem(page)))
+ goto out_failed;
/*
* Is the page already in the swap cache? If so, then
@@ -77,14 +70,14 @@
* memory, and we should just continue our scan.
*/
if (PageSwapCache(page)) {
- entry = page->offset;
+ entry = get_pagecache_pte(page);
swap_duplicate(entry);
- set_pte(page_table, __pte(entry));
+ set_pte(page_table, entry);
drop_pte:
vma->vm_mm->rss--;
flush_tlb_page(vma, address);
__free_page(page);
- goto out_failed_unlock;
+ goto out_failed;
}
/*
@@ -111,7 +104,7 @@
* locks etc.
*/
if (!(gfp_mask & __GFP_IO))
- goto out_failed_unlock;
+ goto out_failed;
/*
* Ok, it's really dirty. That means that
@@ -136,9 +129,9 @@
if (vma->vm_ops && vma->vm_ops->swapout) {
int error;
pte_clear(page_table);
- spin_unlock(&vma->vm_mm->page_table_lock);
- flush_tlb_page(vma, address);
vma->vm_mm->rss--;
+ flush_tlb_page(vma, address);
+ vmlist_access_unlock(vma->vm_mm);
error = vma->vm_ops->swapout(vma, page);
if (!error)
goto out_free_success;
@@ -153,15 +146,15 @@
* page with that swap entry.
*/
entry = acquire_swap_entry(page);
- if (!entry)
- goto out_failed_unlock; /* No swap space left */
+ if (!pte_val(entry))
+ goto out_failed; /* No swap space left */
- if (!(page = prepare_bigmem_swapout(page)))
- goto out_swap_free_unlock;
+ if (!(page = prepare_highmem_swapout(page)))
+ goto out_swap_free;
vma->vm_mm->rss--;
- set_pte(page_table, __pte(entry));
- spin_unlock(&vma->vm_mm->page_table_lock);
+ set_pte(page_table, entry);
+ vmlist_access_unlock(vma->vm_mm);
flush_tlb_page(vma, address);
swap_duplicate(entry); /* One for the process, one for the swap cache */
@@ -175,13 +168,9 @@
out_free_success:
__free_page(page);
return 1;
-out_failed_unlock:
- spin_unlock(&vma->vm_mm->page_table_lock);
-out_failed:
- return 0;
-out_swap_free_unlock:
+out_swap_free:
swap_free(entry);
- spin_unlock(&vma->vm_mm->page_table_lock);
+out_failed:
return 0;
}
@@ -208,7 +197,7 @@
if (pmd_none(*dir))
return 0;
if (pmd_bad(*dir)) {
- printk("swap_out_pmd: bad pmd (%08lx)\n", pmd_val(*dir));
+ pmd_ERROR(*dir);
pmd_clear(dir);
return 0;
}
@@ -227,7 +216,7 @@
return result;
address += PAGE_SIZE;
pte++;
- } while (address < end);
+ } while (address && (address < end));
return 0;
}
@@ -239,7 +228,7 @@
if (pgd_none(*dir))
return 0;
if (pgd_bad(*dir)) {
- printk("swap_out_pgd: bad pgd (%08lx)\n", pgd_val(*dir));
+ pgd_ERROR(*dir);
pgd_clear(dir);
return 0;
}
@@ -247,7 +236,7 @@
pmd = pmd_offset(dir, address);
pgd_end = (address + PGDIR_SIZE) & PGDIR_MASK;
- if (end > pgd_end)
+ if (pgd_end && (end > pgd_end))
end = pgd_end;
do {
@@ -256,7 +245,7 @@
return result;
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
- } while (address < end);
+ } while (address && (address < end));
return 0;
}
@@ -272,13 +261,15 @@
pgdir = pgd_offset(vma->vm_mm, address);
end = vma->vm_end;
- while (address < end) {
+ if (address >= end)
+ BUG();
+ do {
int result = swap_out_pgd(vma, pgdir, address, end, gfp_mask);
if (result)
return result;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
pgdir++;
- }
+ } while (address && (address < end));
return 0;
}
@@ -293,8 +284,10 @@
address = mm->swap_address;
/*
- * Find the proper vm-area
+ * Find the proper vm-area after freezing the vma chain
+ * and ptes.
*/
+ vmlist_access_lock(mm);
vma = find_vma(mm, address);
if (vma) {
if (address < vma->vm_start)
@@ -310,6 +303,7 @@
address = vma->vm_start;
}
}
+ vmlist_access_unlock(mm);
/* We didn't find anything for the process */
mm->swap_cnt = 0;
@@ -502,8 +496,8 @@
*/
do {
/* kswapd is critical to provide GFP_ATOMIC
- allocations (not GFP_BIGMEM ones). */
- if (nr_free_pages - nr_free_bigpages >= freepages.high)
+ allocations (not GFP_HIGHMEM ones). */
+ if (nr_free_pages - nr_free_highpages >= freepages.high)
break;
if (!do_try_to_free_pages(GFP_KSWAPD))
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)