Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A small collection of fixes for the current kernel.  This contains:

   - Two error handling fixes from Jan Kara.  One for null_blk on
     failure to add a device, and the other for the block/scsi_ioctl
     SCSI_IOCTL_SEND_COMMAND fixing up the error jump point.

   - A commit added in the merge window for the bio integrity bits
     unfortunately disabled merging for all requests if
     CONFIG_BLK_DEV_INTEGRITY wasn't set.  Reverse the logic, so that
     integrity checking wont disallow merges when not enabled.

   - A fix from Ming Lei for merging and generating too many segments.
     This caused a BUG in virtio_blk.

   - Two error handling printk() fixups from Robert Elliott, improving
     the information given when we rate limit.

   - Error handling fixup on elevator_init() failure from Sudip
     Mukherjee.

   - A fix from Tony Battersby, fixing up a memory leak in the
     scatterlist handling with scsi-mq"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: Fix merge logic when CONFIG_BLK_DEV_INTEGRITY is not defined
  lib/scatterlist: fix memory leak with scsi-mq
  block: fix wrong error return in elevator_init()
  scsi: Fix error handling in SCSI_IOCTL_SEND_COMMAND
  null_blk: Cleanup error recovery in null_add_dev()
  blk-merge: recaculate segment if it isn't less than max segments
  fs: clarify rate limit suppressed buffer I/O errors
  fs: merge I/O error prints into one line
This commit is contained in:
Linus Torvalds 2014-10-29 11:57:10 -07:00
commit d506aa68c2
7 changed files with 28 additions and 46 deletions

View File

@ -99,16 +99,17 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
{ {
bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
&q->queue_flags); &q->queue_flags);
bool merge_not_need = bio->bi_vcnt < queue_max_segments(q);
if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) && if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) &&
bio->bi_vcnt < queue_max_segments(q)) merge_not_need)
bio->bi_phys_segments = bio->bi_vcnt; bio->bi_phys_segments = bio->bi_vcnt;
else { else {
struct bio *nxt = bio->bi_next; struct bio *nxt = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio,
no_sg_merge); no_sg_merge && merge_not_need);
bio->bi_next = nxt; bio->bi_next = nxt;
} }

View File

@ -229,7 +229,9 @@ int elevator_init(struct request_queue *q, char *name)
} }
err = e->ops.elevator_init_fn(q, e); err = e->ops.elevator_init_fn(q, e);
return 0; if (err)
elevator_put(e);
return err;
} }
EXPORT_SYMBOL(elevator_init); EXPORT_SYMBOL(elevator_init);

View File

@ -508,7 +508,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
err = DRIVER_ERROR << 24; err = DRIVER_ERROR << 24;
goto out; goto error;
} }
memset(sense, 0, sizeof(sense)); memset(sense, 0, sizeof(sense));
@ -517,7 +517,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
blk_execute_rq(q, disk, rq, 0); blk_execute_rq(q, disk, rq, 0);
out:
err = rq->errors & 0xff; /* only 8 bit SCSI status */ err = rq->errors & 0xff; /* only 8 bit SCSI status */
if (err) { if (err) {
if (rq->sense_len && rq->sense) { if (rq->sense_len && rq->sense) {

View File

@ -450,14 +450,10 @@ static int init_driver_queues(struct nullb *nullb)
ret = setup_commands(nq); ret = setup_commands(nq);
if (ret) if (ret)
goto err_queue; return ret;
nullb->nr_queues++; nullb->nr_queues++;
} }
return 0; return 0;
err_queue:
cleanup_queues(nullb);
return ret;
} }
static int null_add_dev(void) static int null_add_dev(void)
@ -507,7 +503,9 @@ static int null_add_dev(void)
goto out_cleanup_queues; goto out_cleanup_queues;
} }
blk_queue_make_request(nullb->q, null_queue_bio); blk_queue_make_request(nullb->q, null_queue_bio);
init_driver_queues(nullb); rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
} else { } else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
if (!nullb->q) { if (!nullb->q) {
@ -516,7 +514,9 @@ static int null_add_dev(void)
} }
blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
init_driver_queues(nullb); rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
} }
nullb->q->queuedata = nullb; nullb->q->queuedata = nullb;

View File

@ -128,21 +128,15 @@ __clear_page_buffers(struct page *page)
page_cache_release(page); page_cache_release(page);
} }
static void buffer_io_error(struct buffer_head *bh, char *msg)
static int quiet_error(struct buffer_head *bh)
{
if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
return 0;
return 1;
}
static void buffer_io_error(struct buffer_head *bh)
{ {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
if (!test_bit(BH_Quiet, &bh->b_state))
printk_ratelimited(KERN_ERR
"Buffer I/O error on dev %s, logical block %llu%s\n",
bdevname(bh->b_bdev, b), bdevname(bh->b_bdev, b),
(unsigned long long)bh->b_blocknr); (unsigned long long)bh->b_blocknr, msg);
} }
/* /*
@ -177,17 +171,10 @@ EXPORT_SYMBOL(end_buffer_read_sync);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate) void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{ {
char b[BDEVNAME_SIZE];
if (uptodate) { if (uptodate) {
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else { } else {
if (!quiet_error(bh)) { buffer_io_error(bh, ", lost sync page write");
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
bdevname(bh->b_bdev, b));
}
set_buffer_write_io_error(bh); set_buffer_write_io_error(bh);
clear_buffer_uptodate(bh); clear_buffer_uptodate(bh);
} }
@ -304,8 +291,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else { } else {
clear_buffer_uptodate(bh); clear_buffer_uptodate(bh);
if (!quiet_error(bh)) buffer_io_error(bh, ", async page read");
buffer_io_error(bh);
SetPageError(page); SetPageError(page);
} }
@ -353,7 +339,6 @@ still_busy:
*/ */
void end_buffer_async_write(struct buffer_head *bh, int uptodate) void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{ {
char b[BDEVNAME_SIZE];
unsigned long flags; unsigned long flags;
struct buffer_head *first; struct buffer_head *first;
struct buffer_head *tmp; struct buffer_head *tmp;
@ -365,12 +350,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
if (uptodate) { if (uptodate) {
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else { } else {
if (!quiet_error(bh)) { buffer_io_error(bh, ", lost async page write");
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
bdevname(bh->b_bdev, b));
}
set_bit(AS_EIO, &page->mapping->flags); set_bit(AS_EIO, &page->mapping->flags);
set_buffer_write_io_error(bh); set_buffer_write_io_error(bh);
clear_buffer_uptodate(bh); clear_buffer_uptodate(bh);

View File

@ -1583,13 +1583,13 @@ static inline bool blk_integrity_merge_rq(struct request_queue *rq,
struct request *r1, struct request *r1,
struct request *r2) struct request *r2)
{ {
return 0; return true;
} }
static inline bool blk_integrity_merge_bio(struct request_queue *rq, static inline bool blk_integrity_merge_bio(struct request_queue *rq,
struct request *r, struct request *r,
struct bio *b) struct bio *b)
{ {
return 0; return true;
} }
static inline bool blk_integrity_is_initialized(struct gendisk *g) static inline bool blk_integrity_is_initialized(struct gendisk *g)
{ {

View File

@ -203,10 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
} }
table->orig_nents -= sg_size; table->orig_nents -= sg_size;
if (!skip_first_chunk) { if (skip_first_chunk)
free_fn(sgl, alloc_size);
skip_first_chunk = false; skip_first_chunk = false;
} else
free_fn(sgl, alloc_size);
sgl = next; sgl = next;
} }