patch-2.3.99-pre6 linux/mm/filemap.c
Next file: linux/mm/memory.c
Previous file: linux/lib/brlock.c
Back to the patch index
Back to the overall index
- Lines: 263
- Date:
Wed Apr 26 15:06:25 2000
- Orig file:
v2.3.99-pre5/linux/mm/filemap.c
- Orig date:
Tue Apr 11 15:09:25 2000
diff -u --recursive --new-file v2.3.99-pre5/linux/mm/filemap.c linux/mm/filemap.c
@@ -44,6 +44,7 @@
atomic_t page_cache_size = ATOMIC_INIT(0);
unsigned int page_hash_bits;
struct page **page_hash_table;
+struct list_head lru_cache;
spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
/*
@@ -77,6 +78,15 @@
atomic_dec(&page_cache_size);
}
+static inline int sync_page(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+
+ if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
+ return mapping->a_ops->sync_page(page);
+ return 0;
+}
+
/*
* Remove a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
@@ -87,6 +97,9 @@
if (!PageLocked(page))
PAGE_BUG(page);
+ /* Initiate completion of any async operations */
+ sync_page(page);
+
spin_lock(&pagecache_lock);
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
@@ -99,6 +112,7 @@
struct list_head *head, *curr;
struct page * page;
+ repeat:
head = &inode->i_mapping->pages;
spin_lock(&pagecache_lock);
curr = head->next;
@@ -110,14 +124,13 @@
/* We cannot invalidate a locked page */
if (TryLockPage(page))
continue;
+ spin_unlock(&pagecache_lock);
lru_cache_del(page);
- remove_page_from_inode_queue(page);
- remove_page_from_hash_queue(page);
- page->mapping = NULL;
+ remove_inode_page(page);
UnlockPage(page);
-
page_cache_release(page);
+ goto repeat;
}
spin_unlock(&pagecache_lock);
}
@@ -149,11 +162,16 @@
/* page wholly truncated - free it */
if (offset >= start) {
+ if (TryLockPage(page)) {
+ spin_unlock(&pagecache_lock);
+ get_page(page);
+ wait_on_page(page);
+ put_page(page);
+ goto repeat;
+ }
get_page(page);
spin_unlock(&pagecache_lock);
- lock_page(page);
-
if (!page->buffers || block_flushpage(page, 0))
lru_cache_del(page);
@@ -191,11 +209,13 @@
continue;
/* partial truncate, clear end of page */
+ if (TryLockPage(page)) {
+ spin_unlock(&pagecache_lock);
+ goto repeat;
+ }
get_page(page);
spin_unlock(&pagecache_lock);
- lock_page(page);
-
memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
if (page->buffers)
block_flushpage(page, partial);
@@ -208,6 +228,9 @@
*/
UnlockPage(page);
page_cache_release(page);
+ get_page(page);
+ wait_on_page(page);
+ put_page(page);
goto repeat;
}
spin_unlock(&pagecache_lock);
@@ -215,46 +238,55 @@
int shrink_mmap(int priority, int gfp_mask, zone_t *zone)
{
- int ret = 0, count;
+ int ret = 0, loop = 0, count;
LIST_HEAD(young);
LIST_HEAD(old);
LIST_HEAD(forget);
struct list_head * page_lru, * dispose;
- struct page * page;
-
+ struct page * page = NULL;
+ struct zone_struct * p_zone;
+ int maxloop = 256 >> priority;
+
if (!zone)
BUG();
- count = nr_lru_pages / (priority+1);
+ count = nr_lru_pages >> priority;
+ if (!count)
+ return ret;
spin_lock(&pagemap_lru_lock);
-
- while (count > 0 && (page_lru = zone->lru_cache.prev) != &zone->lru_cache) {
+again:
+ /* we need pagemap_lru_lock for list_del() ... subtle code below */
+ while (count > 0 && (page_lru = lru_cache.prev) != &lru_cache) {
page = list_entry(page_lru, struct page, lru);
list_del(page_lru);
+ p_zone = page->zone;
- dispose = &zone->lru_cache;
- if (test_and_clear_bit(PG_referenced, &page->flags))
- /* Roll the page at the top of the lru list,
- * we could also be more aggressive putting
- * the page in the young-dispose-list, so
- * avoiding to free young pages in each pass.
- */
- goto dispose_continue;
-
+ /*
+ * These two tests are there to make sure we don't free too
+ * many pages from the "wrong" zone. We free some anyway,
+ * they are the least recently used pages in the system.
+ * When we don't free them, leave them in &old.
+ */
dispose = &old;
- /* don't account passes over not DMA pages */
- if (zone && (!memclass(page->zone, zone)))
+ if (p_zone != zone && (loop > (maxloop / 4) ||
+ p_zone->free_pages > p_zone->pages_high))
goto dispose_continue;
- count--;
-
+ /* The page is in use, or was used very recently, put it in
+ * &young to make sure that we won't try to free it the next
+ * time */
dispose = &young;
- /* avoid unscalable SMP locking */
+ if (test_and_clear_bit(PG_referenced, &page->flags))
+ goto dispose_continue;
+
+ count--;
if (!page->buffers && page_count(page) > 1)
goto dispose_continue;
+ /* Page not used -> free it; if that fails -> &old */
+ dispose = &old;
if (TryLockPage(page))
goto dispose_continue;
@@ -327,6 +359,7 @@
list_add(page_lru, dispose);
continue;
+ /* we're holding pagemap_lru_lock, so we can just loop again */
dispose_continue:
list_add(page_lru, dispose);
}
@@ -342,9 +375,14 @@
/* nr_lru_pages needs the spinlock */
nr_lru_pages--;
+ loop++;
+ /* wrong zone? not looped too often? roll again... */
+ if (page->zone != zone && loop < maxloop)
+ goto again;
+
out:
- list_splice(&young, &zone->lru_cache);
- list_splice(&old, zone->lru_cache.prev);
+ list_splice(&young, &lru_cache);
+ list_splice(&old, lru_cache.prev);
spin_unlock(&pagemap_lru_lock);
@@ -467,6 +505,9 @@
struct page *alias;
unsigned long flags;
+ if (PageLocked(page))
+ BUG();
+
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty));
page->flags = flags | (1 << PG_locked) | (1 << PG_referenced);
get_page(page);
@@ -574,7 +615,7 @@
add_wait_queue(&page->wait, &wait);
do {
- run_task_queue(&tq_disk);
+ sync_page(page);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!PageLocked(page))
break;
@@ -619,7 +660,7 @@
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- run_task_queue(&tq_disk);
+ sync_page(page);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue(&page->wait, &wait);
@@ -669,7 +710,7 @@
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- run_task_queue(&tq_disk);
+ sync_page(page);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue(&page->wait, &wait);
@@ -1496,7 +1537,7 @@
* mmap_sem is held.
*/
lock_page(page);
- result = inode->i_mapping->a_ops->writepage(dentry, page);
+ result = inode->i_mapping->a_ops->writepage(file, dentry, page);
UnlockPage(page);
return result;
}
@@ -1707,8 +1748,8 @@
error = vma->vm_ops->sync(vma, start, end-start, flags);
if (!error && (flags & MS_SYNC)) {
struct file * file = vma->vm_file;
- if (file)
- error = file_fsync(file, file->f_dentry);
+ if (file && file->f_op && file->f_op->fsync)
+ error = file->f_op->fsync(file, file->f_dentry);
}
return error;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)