]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
block: remove remaining __FUNCTION__ occurrences
authorHarvey Harrison <harvey.harrison@gmail.com>
Thu, 1 May 2008 11:35:17 +0000 (04:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 May 2008 15:04:02 +0000 (08:04 -0700)
__FUNCTION__ is gcc specific, use __func__

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
block/blk-barrier.c
block/blk-core.c
block/blk-settings.c
block/blk-tag.c
block/bsg.c
block/elevator.c

index 66e55288178c0840ba44ff0a859fc72c817baf89..a09ead19f9c5702a1ad76d709c54969176fe9e94 100644 (file)
@@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
 {
        if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
            prepare_flush_fn == NULL) {
-               printk(KERN_ERR "%s: prepare_flush_fn required\n",
-                                                               __FUNCTION__);
+               printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
                return -EINVAL;
        }
 
index 5d09f8c56024011588ec1b89e90bff2033a150ca..b754a4a2f9bd26e2e3aaed11fb6fa3a9c3d67d0b 100644 (file)
@@ -136,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
 
                if (unlikely(nbytes > bio->bi_size)) {
                        printk(KERN_ERR "%s: want %u bytes done, %u left\n",
-                              __FUNCTION__, nbytes, bio->bi_size);
+                              __func__, nbytes, bio->bi_size);
                        nbytes = bio->bi_size;
                }
 
@@ -1566,8 +1566,7 @@ static int __end_that_request_first(struct request *req, int error,
                        if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
                                blk_dump_rq_flags(req, "__end_that");
                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
-                                               __FUNCTION__, bio->bi_idx,
-                                               bio->bi_vcnt);
+                                      __func__, bio->bi_idx, bio->bi_vcnt);
                                break;
                        }
 
index 6089384ab06499becc951478d06dda2e22aa7e00..bb93d4c32775abdc2fdba16c3bc208fb20045f47 100644 (file)
@@ -168,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
 {
        if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
                max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_sectors);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_sectors);
        }
 
        if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -196,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q,
 {
        if (!max_segments) {
                max_segments = 1;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_segments);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_segments);
        }
 
        q->max_phys_segments = max_segments;
@@ -220,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q,
 {
        if (!max_segments) {
                max_segments = 1;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_segments);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_segments);
        }
 
        q->max_hw_segments = max_segments;
@@ -241,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 {
        if (max_size < PAGE_CACHE_SIZE) {
                max_size = PAGE_CACHE_SIZE;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_size);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_size);
        }
 
        q->max_segment_size = max_size;
@@ -357,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 {
        if (mask < PAGE_CACHE_SIZE - 1) {
                mask = PAGE_CACHE_SIZE - 1;
-               printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
-                                                       mask);
+               printk(KERN_INFO "%s: set to minimum %lx\n",
+                      __func__, mask);
        }
 
        q->seg_boundary_mask = mask;
index e176ddbe599e23796c004e3d0354cbc4c6b337cf..de64e04299771f08eebb391616d63b72c2fca64a 100644 (file)
@@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
        if (q && depth > q->nr_requests * 2) {
                depth = q->nr_requests * 2;
                printk(KERN_ERR "%s: adjusted depth to %d\n",
-                               __FUNCTION__, depth);
+                      __func__, depth);
        }
 
        tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
@@ -296,13 +296,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 
        if (unlikely(bqt->tag_index[tag] == NULL))
                printk(KERN_ERR "%s: tag %d is missing\n",
-                      __FUNCTION__, tag);
+                      __func__, tag);
 
        bqt->tag_index[tag] = NULL;
 
        if (unlikely(!test_bit(tag, bqt->tag_map))) {
                printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
-                      __FUNCTION__, tag);
+                      __func__, tag);
                return;
        }
        /*
@@ -340,7 +340,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
        if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
                printk(KERN_ERR
                       "%s: request %p for device [%s] already tagged %d",
-                      __FUNCTION__, rq,
+                      __func__, rq,
                       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
                BUG();
        }
index 23ea4fd1a66d9464b0b3af5dd53f997852f65c1a..fa796b605f5547b546ff41cc4d387496dca424ad 100644 (file)
@@ -57,7 +57,7 @@ enum {
 #undef BSG_DEBUG
 
 #ifdef BSG_DEBUG
-#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
+#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
 #else
 #define dprintk(fmt, args...)
 #endif
index ac5310ef8270984e32799e0524917abf4246853a..980f8ae147b4c396b15886be25bd9b2342322472 100644 (file)
@@ -650,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
 
        default:
                printk(KERN_ERR "%s: bad insertion point %d\n",
-                      __FUNCTION__, where);
+                      __func__, where);
                BUG();
        }
 
@@ -808,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q)
                        rq->cmd_flags |= REQ_QUIET;
                        end_queued_request(rq, 0);
                } else {
-                       printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
-                                                               ret);
+                       printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
                }
        }