|
@@ -1648,3 +1648,121 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
|
|
sector_t maxsector;
|
|
sector_t maxsector;
|
|
|
|
|
|
if (!nr_sectors)
|
|
if (!nr_sectors)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /* Test device or partition size, when known. */
|
|
|
|
+ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
|
|
|
|
+ if (maxsector) {
|
|
|
|
+ sector_t sector = bio->bi_sector;
|
|
|
|
+
|
|
|
|
+ if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
|
|
|
|
+ /*
|
|
|
|
+ * This may well happen - the kernel calls bread()
|
|
|
|
+ * without checking the size of the device, e.g., when
|
|
|
|
+ * mounting a device.
|
|
|
|
+ */
|
|
|
|
+ handle_bad_sector(bio);
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static noinline_for_stack bool
|
|
|
|
+generic_make_request_checks(struct bio *bio)
|
|
|
|
+{
|
|
|
|
+ struct request_queue *q;
|
|
|
|
+ int nr_sectors = bio_sectors(bio);
|
|
|
|
+ int err = -EIO;
|
|
|
|
+ char b[BDEVNAME_SIZE];
|
|
|
|
+ struct hd_struct *part;
|
|
|
|
+
|
|
|
|
+ might_sleep();
|
|
|
|
+
|
|
|
|
+ if (bio_check_eod(bio, nr_sectors))
|
|
|
|
+ goto end_io;
|
|
|
|
+
|
|
|
|
+ q = bdev_get_queue(bio->bi_bdev);
|
|
|
|
+ if (unlikely(!q)) {
|
|
|
|
+ printk(KERN_ERR
|
|
|
|
+ "generic_make_request: Trying to access "
|
|
|
|
+ "nonexistent block-device %s (%Lu)\n",
|
|
|
|
+ bdevname(bio->bi_bdev, b),
|
|
|
|
+ (long long) bio->bi_sector);
|
|
|
|
+ goto end_io;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (likely(bio_is_rw(bio) &&
|
|
|
|
+ nr_sectors > queue_max_hw_sectors(q))) {
|
|
|
|
+ printk(KERN_ERR "bio too big device %s (%u > %u)\n",
|
|
|
|
+ bdevname(bio->bi_bdev, b),
|
|
|
|
+ bio_sectors(bio),
|
|
|
|
+ queue_max_hw_sectors(q));
|
|
|
|
+ goto end_io;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ part = bio->bi_bdev->bd_part;
|
|
|
|
+ if (should_fail_request(part, bio->bi_size) ||
|
|
|
|
+ should_fail_request(&part_to_disk(part)->part0,
|
|
|
|
+ bio->bi_size))
|
|
|
|
+ goto end_io;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If this device has partitions, remap block n
|
|
|
|
+ * of partition p to block n+start(p) of the disk.
|
|
|
|
+ */
|
|
|
|
+ blk_partition_remap(bio);
|
|
|
|
+
|
|
|
|
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
|
|
|
|
+ goto end_io;
|
|
|
|
+
|
|
|
|
+ if (bio_check_eod(bio, nr_sectors))
|
|
|
|
+ goto end_io;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Filter flush bio's early so that make_request based
|
|
|
|
+ * drivers without flush support don't have to worry
|
|
|
|
+ * about them.
|
|
|
|
+ */
|
|
|
|
+ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
|
|
|
|
+ bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
|
|
|
|
+ if (!nr_sectors) {
|
|
|
|
+ err = 0;
|
|
|
|
+ goto end_io;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ((bio->bi_rw & REQ_DISCARD) &&
|
|
|
|
+ (!blk_queue_discard(q) ||
|
|
|
|
+ ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
|
|
|
|
+ err = -EOPNOTSUPP;
|
|
|
|
+ goto end_io;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
|
|
|
|
+ err = -EOPNOTSUPP;
|
|
|
|
+ goto end_io;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Various block parts want %current->io_context and lazy ioc
|
|
|
|
+ * allocation ends up trading a lot of pain for a small amount of
|
|
|
|
+ * memory. Just allocate it upfront. This may fail and block
|
|
|
|
+ * layer knows how to live with it.
|
|
|
|
+ */
|
|
|
|
+ create_io_context(GFP_ATOMIC, q->node);
|
|
|
|
+
|
|
|
|
+ if (blk_throtl_bio(q, bio))
|
|
|
|
+ return false; /* throttled, will be resubmitted later */
|
|
|
|
+
|
|
|
|
+ trace_block_bio_queue(q, bio);
|
|
|
|
+ return true;
|
|
|
|
+
|
|
|
|
+end_io:
|
|
|
|
+ bio_endio(bio, err);
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * generic_make_request - hand a buffer to its device driver for I/O
|