|
@@ -1481,3 +1481,170 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
+ * Check if we can merge with the plugged list before grabbing
|
|
|
+ * any locks.
|
|
|
+ */
|
|
|
+ if (attempt_plug_merge(q, bio, &request_count))
|
|
|
+ return;
|
|
|
+
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+
|
|
|
+ el_ret = elv_merge(q, &req, bio);
|
|
|
+ if (el_ret == ELEVATOR_BACK_MERGE) {
|
|
|
+ if (bio_attempt_back_merge(q, req, bio)) {
|
|
|
+ elv_bio_merged(q, req, bio);
|
|
|
+ if (!attempt_back_merge(q, req))
|
|
|
+ elv_merged_request(q, req, el_ret);
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+ } else if (el_ret == ELEVATOR_FRONT_MERGE) {
|
|
|
+ if (bio_attempt_front_merge(q, req, bio)) {
|
|
|
+ elv_bio_merged(q, req, bio);
|
|
|
+ if (!attempt_front_merge(q, req))
|
|
|
+ elv_merged_request(q, req, el_ret);
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+get_rq:
|
|
|
+ /*
|
|
|
+ * This sync check and mask will be re-done in init_request_from_bio(),
|
|
|
+ * but we need to set it earlier to expose the sync flag to the
|
|
|
+ * rq allocator and io schedulers.
|
|
|
+ */
|
|
|
+ rw_flags = bio_data_dir(bio);
|
|
|
+ if (sync)
|
|
|
+ rw_flags |= REQ_SYNC;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Grab a free request. This is might sleep but can not fail.
|
|
|
+ * Returns with the queue unlocked.
|
|
|
+ */
|
|
|
+ req = get_request(q, rw_flags, bio, GFP_NOIO);
|
|
|
+ if (unlikely(!req)) {
|
|
|
+ bio_endio(bio, -ENODEV); /* @q is dead */
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * After dropping the lock and possibly sleeping here, our request
|
|
|
+ * may now be mergeable after it had proven unmergeable (above).
|
|
|
+ * We don't worry about that case for efficiency. It won't happen
|
|
|
+ * often, and the elevators are able to handle it.
|
|
|
+ */
|
|
|
+ init_request_from_bio(req, bio);
|
|
|
+
|
|
|
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
|
|
|
+ req->cpu = raw_smp_processor_id();
|
|
|
+
|
|
|
+ plug = current->plug;
|
|
|
+ if (plug) {
|
|
|
+ /*
|
|
|
+ * If this is the first request added after a plug, fire
|
|
|
+ * of a plug trace. If others have been added before, check
|
|
|
+ * if we have multiple devices in this plug. If so, make a
|
|
|
+ * note to sort the list before dispatch.
|
|
|
+ */
|
|
|
+ if (list_empty(&plug->list))
|
|
|
+ trace_block_plug(q);
|
|
|
+ else {
|
|
|
+ if (!plug->should_sort) {
|
|
|
+ struct request *__rq;
|
|
|
+
|
|
|
+ __rq = list_entry_rq(plug->list.prev);
|
|
|
+ if (__rq->q != q)
|
|
|
+ plug->should_sort = 1;
|
|
|
+ }
|
|
|
+ if (request_count >= BLK_MAX_REQUEST_COUNT) {
|
|
|
+ blk_flush_plug_list(plug, false);
|
|
|
+ trace_block_plug(q);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ list_add_tail(&req->queuelist, &plug->list);
|
|
|
+ drive_stat_acct(req, 1);
|
|
|
+ } else {
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+ add_acct_request(q, req, where);
|
|
|
+ __blk_run_queue(q);
|
|
|
+out_unlock:
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
|
|
|
+
|
|
|
+/*
|
|
|
+ * If bio->bi_dev is a partition, remap the location
|
|
|
+ */
|
|
|
+static inline void blk_partition_remap(struct bio *bio)
|
|
|
+{
|
|
|
+ struct block_device *bdev = bio->bi_bdev;
|
|
|
+
|
|
|
+ if (bio_sectors(bio) && bdev != bdev->bd_contains) {
|
|
|
+ struct hd_struct *p = bdev->bd_part;
|
|
|
+
|
|
|
+ bio->bi_sector += p->start_sect;
|
|
|
+ bio->bi_bdev = bdev->bd_contains;
|
|
|
+
|
|
|
+ trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
|
|
|
+ bdev->bd_dev,
|
|
|
+ bio->bi_sector - p->start_sect);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void handle_bad_sector(struct bio *bio)
|
|
|
+{
|
|
|
+ char b[BDEVNAME_SIZE];
|
|
|
+
|
|
|
+ printk(KERN_INFO "attempt to access beyond end of device\n");
|
|
|
+ printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
|
|
|
+ bdevname(bio->bi_bdev, b),
|
|
|
+ bio->bi_rw,
|
|
|
+ (unsigned long long)bio->bi_sector + bio_sectors(bio),
|
|
|
+ (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
|
|
|
+
|
|
|
+ set_bit(BIO_EOF, &bio->bi_flags);
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_FAIL_MAKE_REQUEST
|
|
|
+
|
|
|
+static DECLARE_FAULT_ATTR(fail_make_request);
|
|
|
+
|
|
|
+static int __init setup_fail_make_request(char *str)
|
|
|
+{
|
|
|
+ return setup_fault_attr(&fail_make_request, str);
|
|
|
+}
|
|
|
+__setup("fail_make_request=", setup_fail_make_request);
|
|
|
+
|
|
|
+static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
|
|
|
+{
|
|
|
+ return part->make_it_fail && should_fail(&fail_make_request, bytes);
|
|
|
+}
|
|
|
+
|
|
|
+static int __init fail_make_request_debugfs(void)
|
|
|
+{
|
|
|
+ struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
|
|
|
+ NULL, &fail_make_request);
|
|
|
+
|
|
|
+ return IS_ERR(dir) ? PTR_ERR(dir) : 0;
|
|
|
+}
|
|
|
+
|
|
|
+late_initcall(fail_make_request_debugfs);
|
|
|
+
|
|
|
+#else /* CONFIG_FAIL_MAKE_REQUEST */
|
|
|
+
|
|
|
+static inline bool should_fail_request(struct hd_struct *part,
|
|
|
+ unsigned int bytes)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* CONFIG_FAIL_MAKE_REQUEST */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Check whether this bio extends beyond the end of the device.
|
|
|
+ */
|
|
|
+static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
|
|
|
+{
|
|
|
+ sector_t maxsector;
|
|
|
+
|
|
|
+ if (!nr_sectors)
|