patch-2.4.11-dontuse linux/fs/buffer.c
Next file: linux/fs/devfs/base.c
Previous file: linux/fs/block_dev.c
Back to the patch index
Back to the overall index
- Lines: 878
- Date:
Sun Sep 30 12:05:19 2001
- Orig file:
v2.4.10/linux/fs/buffer.c
- Orig date:
Wed Sep 26 11:53:42 2001
diff -u --recursive --new-file v2.4.10/linux/fs/buffer.c linux/fs/buffer.c
@@ -52,22 +52,13 @@
#include <asm/bitops.h>
#include <asm/mmu_context.h>
-#define NR_SIZES 7
-static char buffersize_index[65] =
-{-1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
- 4, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1,
- 5, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1,
- 6};
-
-#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
#define NR_RESERVED (10*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this
number of unused buffer heads */
/* Anti-deadlock ordering:
- * lru_list_lock > hash_table_lock > free_list_lock > unused_list_lock
+ * lru_list_lock > hash_table_lock > unused_list_lock
*/
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_inode_buffers)
@@ -90,13 +81,7 @@
static spinlock_t unused_list_lock = SPIN_LOCK_UNLOCKED;
static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
-struct bh_free_head {
- struct buffer_head *list;
- spinlock_t lock;
-};
-static struct bh_free_head free_list[NR_SIZES];
-
-static int grow_buffers(int size);
+static int grow_buffers(kdev_t dev, unsigned long block, int size);
static void __refile_buffer(struct buffer_head *);
/* This is used by some architectures to estimate available memory. */
@@ -481,12 +466,16 @@
((block) << (bh_hash_shift - 12))))
#define hash(dev,block) hash_table[(_hashfn(HASHDEV(dev),block) & bh_hash_mask)]
-static __inline__ void __hash_link(struct buffer_head *bh, struct buffer_head **head)
+static inline void __insert_into_hash_list(struct buffer_head *bh)
{
- if ((bh->b_next = *head) != NULL)
- bh->b_next->b_pprev = &bh->b_next;
+ struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr);
+ struct buffer_head *next = *head;
+
*head = bh;
bh->b_pprev = head;
+ bh->b_next = next;
+ if (next != NULL)
+ next->b_pprev = &bh->b_next;
}
static __inline__ void __hash_unlink(struct buffer_head *bh)
@@ -503,6 +492,8 @@
{
struct buffer_head **bhp = &lru_list[blist];
+ if (bh->b_prev_free || bh->b_next_free) BUG();
+
if(!*bhp) {
*bhp = bh;
bh->b_prev_free = bh;
@@ -530,19 +521,6 @@
}
}
-static void __remove_from_free_list(struct buffer_head * bh, int index)
-{
- if(bh->b_next_free == bh)
- free_list[index].list = NULL;
- else {
- bh->b_prev_free->b_next_free = bh->b_next_free;
- bh->b_next_free->b_prev_free = bh->b_prev_free;
- if (free_list[index].list == bh)
- free_list[index].list = bh->b_next_free;
- }
- bh->b_next_free = bh->b_prev_free = NULL;
-}
-
/* must be called with both the hash_table_lock and the lru_list_lock
held */
static void __remove_from_queues(struct buffer_head *bh)
@@ -551,67 +529,28 @@
__remove_from_lru_list(bh, bh->b_list);
}
-static void __insert_into_queues(struct buffer_head *bh)
-{
- struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr);
-
- __hash_link(bh, head);
- __insert_into_lru_list(bh, bh->b_list);
-}
-
-/* This function must only run if there are no other
- * references _anywhere_ to this buffer head.
- */
-static void put_last_free(struct buffer_head * bh)
+struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
{
- struct bh_free_head *head = &free_list[BUFSIZE_INDEX(bh->b_size)];
- struct buffer_head **bhp = &head->list;
-
- bh->b_state = 0;
+ struct buffer_head *bh, **p = &hash(dev, block);
- spin_lock(&head->lock);
- bh->b_dev = B_FREE;
- if(!*bhp) {
- *bhp = bh;
- bh->b_prev_free = bh;
- }
- bh->b_next_free = *bhp;
- bh->b_prev_free = (*bhp)->b_prev_free;
- (*bhp)->b_prev_free->b_next_free = bh;
- (*bhp)->b_prev_free = bh;
- spin_unlock(&head->lock);
-}
-
-/*
- * Why like this, I hear you say... The reason is race-conditions.
- * As we don't lock buffers (unless we are reading them, that is),
- * something might happen to it while we sleep (ie a read-error
- * will force it bad). This shouldn't really happen currently, but
- * the code is ready.
- */
-static inline struct buffer_head * __get_hash_table(kdev_t dev, int block, int size)
-{
- struct buffer_head *bh = hash(dev, block);
+ read_lock(&hash_table_lock);
- for (; bh; bh = bh->b_next)
- if (bh->b_blocknr == block &&
- bh->b_size == size &&
- bh->b_dev == dev)
+ for (;;) {
+ bh = *p;
+ if (!bh)
break;
- if (bh)
+ p = &bh->b_next;
+ if (bh->b_blocknr != block)
+ continue;
+ if (bh->b_size != size)
+ continue;
+ if (bh->b_dev != dev)
+ continue;
get_bh(bh);
+ break;
+ }
- return bh;
-}
-
-struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
-{
- struct buffer_head *bh;
-
- read_lock(&hash_table_lock);
- bh = __get_hash_table(dev, block, size);
read_unlock(&hash_table_lock);
-
return bh;
}
@@ -688,10 +627,11 @@
we think the disk contains more recent information than the buffercache.
The update == 1 pass marks the buffers we need to update, the update == 2
pass does the actual I/O. */
-void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers, int update)
+void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
{
int i, nlist, slept;
struct buffer_head * bh, * bh_next;
+ kdev_t dev = to_kdev_t(bdev->bd_dev); /* will become bdev */
retry:
slept = 0;
@@ -722,33 +662,14 @@
/* All buffers in the lru lists are mapped */
if (!buffer_mapped(bh))
BUG();
+ if (buffer_dirty(bh))
+ printk("invalidate: dirty buffer\n");
if (!atomic_read(&bh->b_count)) {
if (destroy_dirty_buffers || !buffer_dirty(bh)) {
remove_inode_queue(bh);
- __remove_from_queues(bh);
- put_last_free(bh);
}
- } else if (update) {
- if ((update == 2) ^ buffer_uptodate(bh) &&
- (update == 2) ^ buffer_req(bh)) {
- write_unlock(&hash_table_lock);
- atomic_inc(&bh->b_count);
- spin_unlock(&lru_list_lock);
-
- if (update == 2) {
- ll_rw_block(READ, 1, &bh);
- wait_on_buffer(bh);
- } else {
- lock_buffer(bh);
- clear_bit(BH_Uptodate, &bh->b_state);
- clear_bit(BH_Req, &bh->b_state);
- unlock_buffer(bh);
- }
-
- atomic_dec(&bh->b_count);
- goto retry;
- }
- }
+ } else
+ printk("invalidate: busy buffer\n");
write_unlock(&hash_table_lock);
if (slept)
@@ -759,81 +680,18 @@
spin_unlock(&lru_list_lock);
if (slept)
goto retry;
+
+ /* Get rid of the page cache */
+ invalidate_inode_pages(bdev->bd_inode);
}
-void set_blocksize(kdev_t dev, int size)
+void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers)
{
- extern int *blksize_size[];
- int i, nlist, slept;
- struct buffer_head * bh, * bh_next;
-
- if (!blksize_size[MAJOR(dev)])
- return;
-
- /* Size must be a power of two, and between 512 and PAGE_SIZE */
- if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
- panic("Invalid blocksize passed to set_blocksize");
-
- if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
- blksize_size[MAJOR(dev)][MINOR(dev)] = size;
- return;
- }
- if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
- return;
- sync_buffers(dev, 2);
- blksize_size[MAJOR(dev)][MINOR(dev)] = size;
-
- retry:
- slept = 0;
- spin_lock(&lru_list_lock);
- for(nlist = 0; nlist < NR_LIST; nlist++) {
- bh = lru_list[nlist];
- if (!bh)
- continue;
- for (i = nr_buffers_type[nlist]; i > 0 ; bh = bh_next, i--) {
- bh_next = bh->b_next_free;
- if (bh->b_dev != dev || bh->b_size == size)
- continue;
- /* Unhashed? */
- if (!bh->b_pprev)
- continue;
- if (buffer_locked(bh)) {
- get_bh(bh);
- spin_unlock(&lru_list_lock);
- wait_on_buffer(bh);
- slept = 1;
- spin_lock(&lru_list_lock);
- put_bh(bh);
- }
-
- write_lock(&hash_table_lock);
- if (!atomic_read(&bh->b_count)) {
- if (buffer_dirty(bh))
- printk(KERN_WARNING
- "set_blocksize: dev %s buffer_dirty %lu size %hu\n",
- kdevname(dev), bh->b_blocknr, bh->b_size);
- remove_inode_queue(bh);
- __remove_from_queues(bh);
- put_last_free(bh);
- } else {
- if (atomic_set_buffer_clean(bh))
- __refile_buffer(bh);
- clear_bit(BH_Uptodate, &bh->b_state);
- printk(KERN_WARNING
- "set_blocksize: "
- "b_count %d, dev %s, block %lu, from %p\n",
- atomic_read(&bh->b_count), bdevname(bh->b_dev),
- bh->b_blocknr, __builtin_return_address(0));
- }
- write_unlock(&hash_table_lock);
- if (slept)
- goto out;
- }
+ struct block_device *bdev = bdget(dev);
+ if (bdev) {
+ invalidate_bdev(bdev, destroy_dirty_buffers);
+ bdput(bdev);
}
- out:
- spin_unlock(&lru_list_lock);
- if (slept)
- goto retry;
}
static void free_more_memory(void)
@@ -1137,57 +995,16 @@
*/
struct buffer_head * getblk(kdev_t dev, int block, int size)
{
- struct buffer_head * bh;
- int isize;
+ for (;;) {
+ struct buffer_head * bh;
-repeat:
- spin_lock(&lru_list_lock);
- write_lock(&hash_table_lock);
- bh = __get_hash_table(dev, block, size);
- if (bh)
- goto out;
-
- isize = BUFSIZE_INDEX(size);
- spin_lock(&free_list[isize].lock);
- bh = free_list[isize].list;
- if (bh) {
- __remove_from_free_list(bh, isize);
- atomic_set(&bh->b_count, 1);
- }
- spin_unlock(&free_list[isize].lock);
-
- /*
- * OK, FINALLY we know that this buffer is the only one of
- * its kind, we hold a reference (b_count>0), it is unlocked,
- * and it is clean.
- */
- if (bh) {
- init_buffer(bh, NULL, NULL);
- bh->b_dev = dev;
- bh->b_blocknr = block;
- bh->b_state = 1 << BH_Mapped;
+ bh = get_hash_table(dev, block, size);
+ if (bh)
+ return bh;
- /* Insert the buffer into the regular lists */
- __insert_into_queues(bh);
- out:
- write_unlock(&hash_table_lock);
- spin_unlock(&lru_list_lock);
- touch_buffer(bh);
- return bh;
+ if (!grow_buffers(dev, block, size))
+ free_more_memory();
}
-
- /*
- * If we block while refilling the free list, somebody may
- * create the buffer first ... search the hashes again.
- */
- write_unlock(&hash_table_lock);
- spin_unlock(&lru_list_lock);
-
- if (!grow_buffers(size))
- free_more_memory();
-
- /* FIXME: getblk should fail if there's no enough memory */
- goto repeat;
}
/* -1 -> no need to flush
@@ -1313,22 +1130,7 @@
*/
void __bforget(struct buffer_head * buf)
{
- /* grab the lru lock here to block bdflush. */
- spin_lock(&lru_list_lock);
- write_lock(&hash_table_lock);
- if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf))
- goto in_use;
- __hash_unlink(buf);
- write_unlock(&hash_table_lock);
- remove_inode_queue(buf);
- __remove_from_lru_list(buf, buf->b_list);
- spin_unlock(&lru_list_lock);
- put_last_free(buf);
- return;
-
- in_use:
- write_unlock(&hash_table_lock);
- spin_unlock(&lru_list_lock);
+ __brelse(buf);
}
/**
@@ -1364,6 +1166,7 @@
if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) {
kmem_cache_free(bh_cachep, bh);
} else {
+ bh->b_dev = B_FREE;
bh->b_blocknr = -1;
bh->b_this_page = NULL;
@@ -1416,20 +1219,6 @@
}
spin_unlock(&unused_list_lock);
}
-#if 0
- /*
- * (Pending further analysis ...)
- * Ordinary (non-async) requests can use a different memory priority
- * to free up pages. Any swapping thus generated will use async
- * buffer heads.
- */
- if(!async &&
- (bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {
- memset(bh, 0, sizeof(*bh));
- init_waitqueue_head(&bh->b_wait);
- return bh;
- }
-#endif
return NULL;
}
@@ -1470,7 +1259,7 @@
if (!bh)
goto no_grow;
- bh->b_dev = B_FREE; /* Flag as unused */
+ bh->b_dev = NODEV;
bh->b_this_page = head;
head = bh;
@@ -1524,7 +1313,10 @@
goto try_again;
}
-static void unmap_buffer(struct buffer_head * bh)
+/*
+ * Called when truncating a buffer on a page completely.
+ */
+static void discard_buffer(struct buffer_head * bh)
{
if (buffer_mapped(bh)) {
mark_buffer_clean(bh);
@@ -1564,7 +1356,7 @@
* is this block fully flushed?
*/
if (offset <= curr_off)
- unmap_buffer(bh);
+ discard_buffer(bh);
curr_off = next_off;
bh = next;
} while (bh != head);
@@ -1580,11 +1372,8 @@
* instead.
*/
if (!offset) {
- if (!try_to_free_buffers(page, 0)) {
- if (drop_pagecache)
- atomic_inc(&buffermem_pages);
+ if (!try_to_free_buffers(page, 0))
return 0;
- }
}
return 1;
@@ -1667,10 +1456,10 @@
BUG();
if (!page->buffers)
- create_empty_buffers(page, inode->i_dev, inode->i_sb->s_blocksize);
+ create_empty_buffers(page, inode->i_dev, 1 << inode->i_blkbits);
head = page->buffers;
- block = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+ block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
bh = head;
i = 0;
@@ -1732,12 +1521,12 @@
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
char *kaddr = kmap(page);
- blocksize = inode->i_sb->s_blocksize;
+ blocksize = 1 << inode->i_blkbits;
if (!page->buffers)
create_empty_buffers(page, inode->i_dev, blocksize);
head = page->buffers;
- bbits = inode->i_sb->s_blocksize_bits;
+ bbits = inode->i_blkbits;
block = page->index << (PAGE_CACHE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
@@ -1800,7 +1589,7 @@
unsigned blocksize;
struct buffer_head *bh, *head;
- blocksize = inode->i_sb->s_blocksize;
+ blocksize = 1 << inode->i_blkbits;
for(bh = head = page->buffers, block_start = 0;
bh != head || !block_start;
@@ -1849,14 +1638,14 @@
if (!PageLocked(page))
PAGE_BUG(page);
- blocksize = inode->i_sb->s_blocksize;
+ blocksize = 1 << inode->i_blkbits;
if (!page->buffers)
create_empty_buffers(page, inode->i_dev, blocksize);
head = page->buffers;
- blocks = PAGE_CACHE_SIZE >> inode->i_sb->s_blocksize_bits;
- iblock = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
- lblock = (inode->i_size+blocksize-1) >> inode->i_sb->s_blocksize_bits;
+ blocks = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ iblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ lblock = (inode->i_size+blocksize-1) >> inode->i_blkbits;
bh = head;
nr = 0;
i = 0;
@@ -1923,7 +1712,7 @@
unsigned long pgpos;
long status;
unsigned zerofrom;
- unsigned blocksize = inode->i_sb->s_blocksize;
+ unsigned blocksize = 1 << inode->i_blkbits;
char *kaddr;
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
@@ -2008,6 +1797,14 @@
return err;
}
+int block_commit_write(struct page *page, unsigned from, unsigned to)
+{
+ struct inode *inode = page->mapping->host;
+ __block_commit_write(inode,page,from,to);
+ kunmap(page);
+ return 0;
+}
+
int generic_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
@@ -2032,7 +1829,7 @@
struct buffer_head *bh;
int err;
- blocksize = inode->i_sb->s_blocksize;
+ blocksize = 1 << inode->i_blkbits;
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
@@ -2040,7 +1837,7 @@
return 0;
length = blocksize - length;
- iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+ iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
@@ -2141,47 +1938,6 @@
return tmp.b_blocknr;
}
-int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize, get_block_t * get_block)
-{
- int i, nr_blocks, retval;
- unsigned long * blocks = iobuf->blocks;
-
- nr_blocks = iobuf->length / blocksize;
- /* build the blocklist */
- for (i = 0; i < nr_blocks; i++, blocknr++) {
- struct buffer_head bh;
-
- bh.b_state = 0;
- bh.b_dev = inode->i_dev;
- bh.b_size = blocksize;
-
- retval = get_block(inode, blocknr, &bh, rw == READ ? 0 : 1);
- if (retval)
- goto out;
-
- if (rw == READ) {
- if (buffer_new(&bh))
- BUG();
- if (!buffer_mapped(&bh)) {
- /* there was an hole in the filesystem */
- blocks[i] = -1UL;
- continue;
- }
- } else {
- if (buffer_new(&bh))
- unmap_underlying_metadata(&bh);
- if (!buffer_mapped(&bh))
- BUG();
- }
- blocks[i] = bh.b_blocknr;
- }
-
- retval = brw_kiovec(rw, 1, &iobuf, inode->i_dev, iobuf->blocks, blocksize);
-
- out:
- return retval;
-}
-
/*
* IO completion routine for a buffer_head being used for kiobuf IO: we
* can't dispatch the kiobuf callback until io_count reaches 0.
@@ -2311,7 +2067,6 @@
}
tmp = bhs[bhind++];
- tmp->b_dev = B_FREE;
tmp->b_size = size;
set_bh_page(tmp, map, offset);
tmp->b_this_page = tmp;
@@ -2447,67 +2202,129 @@
return err;
}
+static inline void link_dev_buffers(struct page * page, struct buffer_head *head)
+{
+ struct buffer_head *bh, *tail;
+
+ bh = head;
+ do {
+ tail = bh;
+ bh = bh->b_this_page;
+ } while (bh);
+ tail->b_this_page = head;
+ page->buffers = head;
+ page_cache_get(page);
+}
+
+/*
+ * Create the page-cache page that contains the requested block
+ */
+static struct page * grow_dev_page(struct block_device *bdev, unsigned long index, int size)
+{
+ struct page * page;
+ struct buffer_head *bh;
+
+ page = find_or_create_page(bdev->bd_inode->i_mapping, index, GFP_NOFS);
+ if (IS_ERR(page))
+ return NULL;
+
+ if (!PageLocked(page))
+ BUG();
+
+ bh = page->buffers;
+ if (bh) {
+ if (bh->b_size == size)
+ return page;
+ if (!try_to_free_buffers(page, GFP_NOFS))
+ goto failed;
+ }
+
+ bh = create_buffers(page, size, 0);
+ if (!bh)
+ goto failed;
+ link_dev_buffers(page, bh);
+ return page;
+
+failed:
+ UnlockPage(page);
+ page_cache_release(page);
+ return NULL;
+}
+
+static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size)
+{
+ struct buffer_head *head = page->buffers;
+ struct buffer_head *bh = head;
+ unsigned int uptodate;
+
+ uptodate = 1 << BH_Mapped;
+ if (Page_Uptodate(page))
+ uptodate |= 1 << BH_Uptodate;
+
+ write_lock(&hash_table_lock);
+ do {
+ if (!(bh->b_state & (1 << BH_Mapped))) {
+ init_buffer(bh, NULL, NULL);
+ bh->b_dev = dev;
+ bh->b_blocknr = block;
+ bh->b_state = uptodate;
+ }
+
+ /* Insert the buffer into the hash lists if necessary */
+ if (!bh->b_pprev)
+ __insert_into_hash_list(bh);
+
+ block++;
+ bh = bh->b_this_page;
+ } while (bh != head);
+ write_unlock(&hash_table_lock);
+}
+
/*
* Try to increase the number of buffers available: the size argument
* is used to determine what kind of buffers we want.
*/
-static int grow_buffers(int size)
+static int grow_buffers(kdev_t dev, unsigned long block, int size)
{
struct page * page;
- struct buffer_head *bh, *tmp;
- struct buffer_head * insert_point;
- int isize;
+ struct block_device *bdev;
+ unsigned long index;
+ int sizebits;
if ((size & 511) || (size > PAGE_SIZE)) {
printk(KERN_ERR "VFS: grow_buffers: size = %d\n",size);
return 0;
}
+ sizebits = -1;
+ do {
+ sizebits++;
+ } while ((size << sizebits) < PAGE_SIZE);
- page = alloc_page(GFP_NOFS);
- if (!page)
- goto out;
- LockPage(page);
- bh = create_buffers(page, size, 0);
- if (!bh)
- goto no_buffer_head;
-
- isize = BUFSIZE_INDEX(size);
+ index = block >> sizebits;
+ block = index << sizebits;
- spin_lock(&free_list[isize].lock);
- insert_point = free_list[isize].list;
- tmp = bh;
- while (1) {
- if (insert_point) {
- tmp->b_next_free = insert_point->b_next_free;
- tmp->b_prev_free = insert_point;
- insert_point->b_next_free->b_prev_free = tmp;
- insert_point->b_next_free = tmp;
- } else {
- tmp->b_prev_free = tmp;
- tmp->b_next_free = tmp;
- }
- insert_point = tmp;
- if (tmp->b_this_page)
- tmp = tmp->b_this_page;
- else
- break;
+ bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev) {
+ printk("No block device for %s\n", kdevname(dev));
+ BUG();
}
- tmp->b_this_page = bh;
- free_list[isize].list = bh;
- spin_unlock(&free_list[isize].lock);
-
- page->buffers = bh;
- page->flags &= ~(1 << PG_referenced);
- lru_cache_add(page);
- UnlockPage(page);
- atomic_inc(&buffermem_pages);
- return 1;
-no_buffer_head:
+ /* Create a page with the proper size buffers.. */
+ page = grow_dev_page(bdev, index, size);
+
+ /* This is "wrong" - talk to Al Viro */
+ atomic_dec(&bdev->bd_count);
+ if (!page)
+ return 0;
+
+ /* Hash in the buffers on the hash list */
+ hash_page_buffers(page, dev, block, size);
UnlockPage(page);
page_cache_release(page);
-out:
- return 0;
+
+ /* We hashed up this page, so increment buffermem */
+ atomic_inc(&buffermem_pages);
+ return 1;
}
static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask)
@@ -2522,7 +2339,7 @@
ll_rw_block(WRITE, 1, &p);
tryagain = 0;
} else if (buffer_locked(p)) {
- if (gfp_mask & __GFP_WAIT) {
+ if (gfp_mask & __GFP_WAITBUF) {
wait_on_buffer(p);
tryagain = 1;
} else
@@ -2557,12 +2374,10 @@
int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
{
struct buffer_head * tmp, * bh = page->buffers;
- int index = BUFSIZE_INDEX(bh->b_size);
cleaned_buffers_try_again:
spin_lock(&lru_list_lock);
write_lock(&hash_table_lock);
- spin_lock(&free_list[index].lock);
tmp = bh;
do {
if (buffer_busy(tmp))
@@ -2572,18 +2387,18 @@
spin_lock(&unused_list_lock);
tmp = bh;
+
+ /* if this buffer was hashed, this page counts as buffermem */
+ if (bh->b_pprev)
+ atomic_dec(&buffermem_pages);
do {
struct buffer_head * p = tmp;
tmp = tmp->b_this_page;
- /* The buffer can be either on the regular
- * queues or on the free list..
- */
- if (p->b_dev != B_FREE) {
- remove_inode_queue(p);
- __remove_from_queues(p);
- } else
- __remove_from_free_list(p, index);
+ if (p->b_dev == B_FREE) BUG();
+
+ remove_inode_queue(p);
+ __remove_from_queues(p);
__put_unused_buffer_head(p);
} while (tmp != bh);
spin_unlock(&unused_list_lock);
@@ -2594,14 +2409,12 @@
/* And free the page */
page->buffers = NULL;
page_cache_release(page);
- spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
return 1;
busy_buffer_page:
/* Uhhuh, start writeback so that we don't end up with all dirty pages */
- spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
if (gfp_mask & __GFP_IO) {
@@ -2713,12 +2526,6 @@
/* Setup hash chains. */
for(i = 0; i < nr_hash; i++)
hash_table[i] = NULL;
-
- /* Setup free lists. */
- for(i = 0; i < NR_SIZES; i++) {
- free_list[i].list = NULL;
- free_list[i].lock = SPIN_LOCK_UNLOCKED;
- }
/* Setup lru lists. */
for(i = 0; i < NR_LIST; i++)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)