Commit 24c03d47 authored by Harvey Harrison's avatar Harvey Harrison Committed by Linus Torvalds

block: remove remaining __FUNCTION__ occurrences

__FUNCTION__ is gcc specific, use __func__
Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e37d05da
...@@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, ...@@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
{ {
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
prepare_flush_fn == NULL) { prepare_flush_fn == NULL) {
printk(KERN_ERR "%s: prepare_flush_fn required\n", printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
__FUNCTION__);
return -EINVAL; return -EINVAL;
} }
......
...@@ -136,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, ...@@ -136,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
if (unlikely(nbytes > bio->bi_size)) { if (unlikely(nbytes > bio->bi_size)) {
printk(KERN_ERR "%s: want %u bytes done, %u left\n", printk(KERN_ERR "%s: want %u bytes done, %u left\n",
__FUNCTION__, nbytes, bio->bi_size); __func__, nbytes, bio->bi_size);
nbytes = bio->bi_size; nbytes = bio->bi_size;
} }
...@@ -1566,8 +1566,7 @@ static int __end_that_request_first(struct request *req, int error, ...@@ -1566,8 +1566,7 @@ static int __end_that_request_first(struct request *req, int error,
if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
blk_dump_rq_flags(req, "__end_that"); blk_dump_rq_flags(req, "__end_that");
printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
__FUNCTION__, bio->bi_idx, __func__, bio->bi_idx, bio->bi_vcnt);
bio->bi_vcnt);
break; break;
} }
......
...@@ -168,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) ...@@ -168,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{ {
if ((max_sectors << 9) < PAGE_CACHE_SIZE) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, printk(KERN_INFO "%s: set to minimum %d\n",
max_sectors); __func__, max_sectors);
} }
if (BLK_DEF_MAX_SECTORS > max_sectors) if (BLK_DEF_MAX_SECTORS > max_sectors)
...@@ -196,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q, ...@@ -196,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q,
{ {
if (!max_segments) { if (!max_segments) {
max_segments = 1; max_segments = 1;
printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, printk(KERN_INFO "%s: set to minimum %d\n",
max_segments); __func__, max_segments);
} }
q->max_phys_segments = max_segments; q->max_phys_segments = max_segments;
...@@ -220,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q, ...@@ -220,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q,
{ {
if (!max_segments) { if (!max_segments) {
max_segments = 1; max_segments = 1;
printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, printk(KERN_INFO "%s: set to minimum %d\n",
max_segments); __func__, max_segments);
} }
q->max_hw_segments = max_segments; q->max_hw_segments = max_segments;
...@@ -241,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) ...@@ -241,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{ {
if (max_size < PAGE_CACHE_SIZE) { if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE; max_size = PAGE_CACHE_SIZE;
printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, printk(KERN_INFO "%s: set to minimum %d\n",
max_size); __func__, max_size);
} }
q->max_segment_size = max_size; q->max_segment_size = max_size;
...@@ -357,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) ...@@ -357,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{ {
if (mask < PAGE_CACHE_SIZE - 1) { if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1; mask = PAGE_CACHE_SIZE - 1;
printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, printk(KERN_INFO "%s: set to minimum %lx\n",
mask); __func__, mask);
} }
q->seg_boundary_mask = mask; q->seg_boundary_mask = mask;
......
...@@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) ...@@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
if (q && depth > q->nr_requests * 2) { if (q && depth > q->nr_requests * 2) {
depth = q->nr_requests * 2; depth = q->nr_requests * 2;
printk(KERN_ERR "%s: adjusted depth to %d\n", printk(KERN_ERR "%s: adjusted depth to %d\n",
__FUNCTION__, depth); __func__, depth);
} }
tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
...@@ -296,13 +296,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) ...@@ -296,13 +296,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
if (unlikely(bqt->tag_index[tag] == NULL)) if (unlikely(bqt->tag_index[tag] == NULL))
printk(KERN_ERR "%s: tag %d is missing\n", printk(KERN_ERR "%s: tag %d is missing\n",
__FUNCTION__, tag); __func__, tag);
bqt->tag_index[tag] = NULL; bqt->tag_index[tag] = NULL;
if (unlikely(!test_bit(tag, bqt->tag_map))) { if (unlikely(!test_bit(tag, bqt->tag_map))) {
printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
__FUNCTION__, tag); __func__, tag);
return; return;
} }
/* /*
...@@ -340,7 +340,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -340,7 +340,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
if (unlikely((rq->cmd_flags & REQ_QUEUED))) { if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d", "%s: request %p for device [%s] already tagged %d",
__FUNCTION__, rq, __func__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
BUG(); BUG();
} }
......
...@@ -57,7 +57,7 @@ enum { ...@@ -57,7 +57,7 @@ enum {
#undef BSG_DEBUG #undef BSG_DEBUG
#ifdef BSG_DEBUG #ifdef BSG_DEBUG
#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
#else #else
#define dprintk(fmt, args...) #define dprintk(fmt, args...)
#endif #endif
......
...@@ -650,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -650,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
default: default:
printk(KERN_ERR "%s: bad insertion point %d\n", printk(KERN_ERR "%s: bad insertion point %d\n",
__FUNCTION__, where); __func__, where);
BUG(); BUG();
} }
...@@ -808,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q) ...@@ -808,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q)
rq->cmd_flags |= REQ_QUIET; rq->cmd_flags |= REQ_QUIET;
end_queued_request(rq, 0); end_queued_request(rq, 0);
} else { } else {
printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
ret);
break; break;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment