patch-2.3.16 linux/fs/buffer.c
Next file: linux/fs/coda/psdev.c
Previous file: linux/fs/binfmt_aout.c
Back to the patch index
Back to the overall index
- Lines: 132
- Date:
Tue Aug 31 11:30:48 1999
- Orig file:
v2.3.15/linux/fs/buffer.c
- Orig date:
Thu Aug 26 13:05:40 1999
diff -u --recursive --new-file v2.3.15/linux/fs/buffer.c linux/fs/buffer.c
@@ -146,8 +146,8 @@
atomic_inc(&bh->b_count);
add_wait_queue(&bh->b_wait, &wait);
repeat:
- tsk->state = TASK_UNINTERRUPTIBLE;
run_task_queue(&tq_disk);
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (buffer_locked(bh)) {
schedule();
goto repeat;
@@ -698,7 +698,6 @@
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
- int free;
mark_buffer_uptodate(bh, uptodate);
@@ -718,7 +717,7 @@
* deemed complete once all buffers have been visited
* (b_count==0) and are now unlocked. We must make sure that
* only the _last_ buffer that decrements its count is the one
- * that free's the page..
+ * that unlock the page..
*/
spin_lock_irqsave(&page_uptodate_lock, flags);
unlock_buffer(bh);
@@ -743,26 +742,15 @@
/*
* Run the hooks that have to be done when a page I/O has completed.
- *
- * Note - we need to test the flags before we unlock the page, but
- * we must not actually free the page until after the unlock!
*/
if (test_and_clear_bit(PG_decr_after, &page->flags))
atomic_dec(&nr_async_pages);
- if (test_and_clear_bit(PG_free_swap_after, &page->flags))
- swap_free(page->offset);
-
- free = test_and_clear_bit(PG_free_after, &page->flags);
-
if (page->owner != (void *)-1)
PAGE_BUG(page);
page->owner = current;
UnlockPage(page);
- if (free)
- __free_page(page);
-
return;
still_busy:
@@ -1100,7 +1088,6 @@
*/
static struct buffer_head * create_buffers(unsigned long page, unsigned long size, int async)
{
- DECLARE_WAITQUEUE(wait, current);
struct buffer_head *bh, *head;
long offset;
@@ -1165,14 +1152,7 @@
* Set our state for sleeping, then check again for buffer heads.
* This ensures we won't miss a wake_up from an interrupt.
*/
- add_wait_queue(&buffer_wait, &wait);
- current->state = TASK_UNINTERRUPTIBLE;
- if (nr_unused_buffer_heads < MAX_BUF_PER_PAGE) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
- remove_wait_queue(&buffer_wait, &wait);
- current->state = TASK_RUNNING;
+ wait_event(buffer_wait, nr_unused_buffer_heads >= MAX_BUF_PER_PAGE);
goto try_again;
}
@@ -1238,7 +1218,7 @@
if (!PageLocked(page))
BUG();
if (!page->buffers)
- return 0;
+ return 1;
head = page->buffers;
bh = head;
@@ -1279,10 +1259,13 @@
*/
if (!offset) {
if (!try_to_free_buffers(page))
+ {
atomic_add(PAGE_CACHE_SIZE, &buffermem);
+ return 0;
+ }
}
- return 0;
+ return 1;
}
static void create_empty_buffers(struct page *page, struct inode *inode, unsigned long blocksize)
@@ -1822,6 +1805,10 @@
for (pageind = 0; pageind < iobuf->nr_pages; pageind++) {
page = iobuf->pagelist[pageind];
map = iobuf->maplist[pageind];
+ if (map && PageBIGMEM(map)) {
+ err = -EIO;
+ goto error;
+ }
while (length > 0) {
blocknr = b[bufind++];
@@ -2060,6 +2047,7 @@
static int grow_buffers(int size)
{
unsigned long page;
+ struct page * page_map;
struct buffer_head *bh, *tmp;
struct buffer_head * insert_point;
int isize;
@@ -2100,7 +2088,9 @@
free_list[isize].list = bh;
spin_unlock(&free_list[isize].lock);
- mem_map[MAP_NR(page)].buffers = bh;
+ page_map = mem_map + MAP_NR(page);
+ page_map->buffers = bh;
+ lru_cache_add(page_map);
atomic_add(PAGE_SIZE, &buffermem);
return 1;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)