patch-2.3.99-pre9 linux/fs/buffer.c
Next file: linux/fs/coda/cache.c
Previous file: linux/fs/binfmt_aout.c
Back to the patch index
Back to the overall index
- Lines: 49
- Date:
Sat May 20 10:00:57 2000
- Orig file:
v2.3.99-pre8/linux/fs/buffer.c
- Orig date:
Fri May 12 14:18:55 2000
diff -u --recursive --new-file v2.3.99-pre8/linux/fs/buffer.c linux/fs/buffer.c
@@ -1324,7 +1324,7 @@
* instead.
*/
if (!offset) {
- if (!try_to_free_buffers(page)) {
+ if (!try_to_free_buffers(page, 0)) {
atomic_inc(&buffermem_pages);
return 0;
}
@@ -2121,15 +2121,17 @@
* This all is required so that we can free up memory
* later.
*/
-static void sync_page_buffers(struct buffer_head *bh)
+static void sync_page_buffers(struct buffer_head *bh, int wait)
{
- struct buffer_head * tmp;
+ struct buffer_head * tmp = bh;
- tmp = bh;
do {
struct buffer_head *p = tmp;
tmp = tmp->b_this_page;
- if (buffer_dirty(p) && !buffer_locked(p))
+ if (buffer_locked(p)) {
+ if (wait)
+ __wait_on_buffer(p);
+ } else if (buffer_dirty(p))
ll_rw_block(WRITE, 1, &p);
} while (tmp != bh);
}
@@ -2151,7 +2153,7 @@
* obtain a reference to a buffer head within a page. So we must
* lock out all of these paths to cleanly toss the page.
*/
-int try_to_free_buffers(struct page * page)
+int try_to_free_buffers(struct page * page, int wait)
{
struct buffer_head * tmp, * bh = page->buffers;
int index = BUFSIZE_INDEX(bh->b_size);
@@ -2201,7 +2203,7 @@
spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
- sync_page_buffers(bh);
+ sync_page_buffers(bh, wait);
return 0;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)