patch-2.3.10 linux/mm/page_alloc.c
Next file: linux/mm/vmscan.c
Previous file: linux/mm/mremap.c
Back to the patch index
Back to the overall index
- Lines: 91
- Date:
Fri Jul 2 15:10:36 1999
- Orig file:
v2.3.9/linux/mm/page_alloc.c
- Orig date:
Tue Jun 22 10:50:36 1999
diff -u --recursive --new-file v2.3.9/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -345,90 +345,3 @@
}
return start_mem;
}
-
-/*
- * Primitive swap readahead code. We simply read an aligned block of
- * (1 << page_cluster) entries in the swap area. This method is chosen
- * because it doesn't cost us any seek time. We also make sure to queue
- * the 'original' request together with the readahead ones...
- */
-void swapin_readahead(unsigned long entry)
-{
- int i;
- struct page *new_page;
- unsigned long offset = SWP_OFFSET(entry);
- struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info;
-
- offset = (offset >> page_cluster) << page_cluster;
-
- i = 1 << page_cluster;
- do {
- /* Don't read-ahead past the end of the swap area */
- if (offset >= swapdev->max)
- break;
- /* Don't block on I/O for read-ahead */
- if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster)
- break;
- /* Don't read in bad or busy pages */
- if (!swapdev->swap_map[offset])
- break;
- if (swapdev->swap_map[offset] == SWAP_MAP_BAD)
- break;
-
- /* Ok, do the async read-ahead now */
- new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset), 0);
- if (new_page != NULL)
- __free_page(new_page);
- offset++;
- } while (--i);
- return;
-}
-
-/*
- * The tests may look silly, but it essentially makes sure that
- * no other process did a swap-in on us just as we were waiting.
- *
- * Also, don't bother to add to the swap cache if this page-in
- * was due to a write access.
- */
-void swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
- pte_t * page_table, unsigned long entry, int write_access)
-{
- unsigned long page;
- struct page *page_map = lookup_swap_cache(entry);
-
- if (!page_map) {
- swapin_readahead(entry);
- page_map = read_swap_cache(entry);
- }
- if (pte_val(*page_table) != entry) {
- if (page_map)
- free_page_and_swap_cache(page_address(page_map));
- return;
- }
- if (!page_map) {
- set_pte(page_table, BAD_PAGE);
- swap_free(entry);
- oom(tsk);
- return;
- }
-
- page = page_address(page_map);
- vma->vm_mm->rss++;
- tsk->min_flt++;
- swap_free(entry);
-
- if (!write_access || is_page_shared(page_map)) {
- set_pte(page_table, mk_pte(page, vma->vm_page_prot));
- return;
- }
-
- /*
- * The page is unshared and we're going to dirty it - so tear
- * down the swap cache and give exclusive access to the page to
- * this process.
- */
- delete_from_swap_cache(page_map);
- set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
- return;
-}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)