Commit 301f67bd authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] uninline somethings in fs/*.c

- Don't inline mpage_writepage().

  Even though it had but a single call site, uninlining this function
  took mpage.o from 4529 bytes of text to 3985.  gcc-2.95.3.

- Various buffer.c tweaks.  13233 bytes down to 12977
parent 62a34f2d
......@@ -142,7 +142,7 @@ void __wait_on_buffer(struct buffer_head * bh)
finish_wait(wqh, &wait);
}
static inline void
static void
__set_page_buffers(struct page *page, struct buffer_head *head)
{
if (page_has_buffers(page))
......@@ -152,7 +152,7 @@ __set_page_buffers(struct page *page, struct buffer_head *head)
page->private = (unsigned long)head;
}
static inline void
static void
__clear_page_buffers(struct page *page)
{
ClearPagePrivate(page);
......@@ -594,14 +594,14 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
* PageLocked prevents anyone from starting writeback of a page which is
* under read I/O (PageWriteback is only ever set against a locked page).
*/
inline void mark_buffer_async_read(struct buffer_head *bh)
void mark_buffer_async_read(struct buffer_head *bh)
{
bh->b_end_io = end_buffer_async_read;
set_buffer_async_read(bh);
}
EXPORT_SYMBOL(mark_buffer_async_read);
inline void mark_buffer_async_write(struct buffer_head *bh)
void mark_buffer_async_write(struct buffer_head *bh)
{
bh->b_end_io = end_buffer_async_write;
set_buffer_async_write(bh);
......@@ -960,7 +960,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
/*
* Initialise the state of a blockdev page's buffers.
*/
static /*inline*/ void
static void
init_page_buffers(struct page *page, struct block_device *bdev,
int block, int size)
{
......@@ -989,7 +989,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
*
* This is user purely for blockdev mappings.
*/
static /*inline*/ struct page *
static struct page *
grow_dev_page(struct block_device *bdev, unsigned long block,
unsigned long index, int size)
{
......@@ -1416,7 +1416,7 @@ EXPORT_SYMBOL(set_bh_page);
/*
* Called when truncating a buffer on a page completely.
*/
static /* inline */ void discard_buffer(struct buffer_head * bh)
static inline void discard_buffer(struct buffer_head * bh)
{
lock_buffer(bh);
clear_buffer_dirty(bh);
......@@ -2451,7 +2451,7 @@ static inline int buffer_busy(struct buffer_head *bh)
(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
}
static inline int
static int
drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
{
struct buffer_head *head = page_buffers(page);
......
......@@ -317,7 +317,7 @@ EXPORT_SYMBOL(mpage_readpage);
* written, so it can intelligently allocate a suitably-sized BIO. For now,
* just allocate full-size (16-page) BIOs.
*/
static inline struct bio *
static struct bio *
mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
sector_t *last_block_in_bio, int *ret)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment