|
@@ -386,3 +386,155 @@ static void blk_account_io_merge(struct request *req)
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
cpu = part_stat_lock();
|
|
cpu = part_stat_lock();
|
|
|
|
+ part = req->part;
|
|
|
|
+
|
|
|
|
+ part_round_stats(cpu, part);
|
|
|
|
+ part_dec_in_flight(part, rq_data_dir(req));
|
|
|
|
+
|
|
|
|
+ hd_struct_put(part);
|
|
|
|
+ part_stat_unlock();
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Has to be called with the request spinlock acquired
|
|
|
|
+ */
|
|
|
|
+static int attempt_merge(struct request_queue *q, struct request *req,
|
|
|
|
+ struct request *next)
|
|
|
|
+{
|
|
|
|
+ if (!rq_mergeable(req) || !rq_mergeable(next))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * not contiguous
|
|
|
|
+ */
|
|
|
|
+ if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (rq_data_dir(req) != rq_data_dir(next)
|
|
|
|
+ || req->rq_disk != next->rq_disk
|
|
|
|
+ || next->special)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (req->cmd_flags & REQ_WRITE_SAME &&
|
|
|
|
+ !blk_write_same_mergeable(req->bio, next->bio))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If we are allowed to merge, then append bio list
|
|
|
|
+ * from next to rq and release next. merge_requests_fn
|
|
|
|
+ * will have updated segment counts, update sector
|
|
|
|
+ * counts here.
|
|
|
|
+ */
|
|
|
|
+ if (!ll_merge_requests_fn(q, req, next))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If failfast settings disagree or any of the two is already
|
|
|
|
+ * a mixed merge, mark both as mixed before proceeding. This
|
|
|
|
+ * makes sure that all involved bios have mixable attributes
|
|
|
|
+ * set properly.
|
|
|
|
+ */
|
|
|
|
+ if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
|
|
|
|
+ (req->cmd_flags & REQ_FAILFAST_MASK) !=
|
|
|
|
+ (next->cmd_flags & REQ_FAILFAST_MASK)) {
|
|
|
|
+ blk_rq_set_mixed_merge(req);
|
|
|
|
+ blk_rq_set_mixed_merge(next);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * At this point we have either done a back merge
|
|
|
|
+ * or front merge. We need the smaller start_time of
|
|
|
|
+ * the merged requests to be the current request
|
|
|
|
+ * for accounting purposes.
|
|
|
|
+ */
|
|
|
|
+ if (time_after(req->start_time, next->start_time))
|
|
|
|
+ req->start_time = next->start_time;
|
|
|
|
+
|
|
|
|
+ req->biotail->bi_next = next->bio;
|
|
|
|
+ req->biotail = next->biotail;
|
|
|
|
+
|
|
|
|
+ req->__data_len += blk_rq_bytes(next);
|
|
|
|
+
|
|
|
|
+ elv_merge_requests(q, req, next);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * 'next' is going away, so update stats accordingly
|
|
|
|
+ */
|
|
|
|
+ blk_account_io_merge(next);
|
|
|
|
+
|
|
|
|
+ req->ioprio = ioprio_best(req->ioprio, next->ioprio);
|
|
|
|
+ if (blk_rq_cpu_valid(next))
|
|
|
|
+ req->cpu = next->cpu;
|
|
|
|
+
|
|
|
|
+ /* owner-ship of bio passed from next to req */
|
|
|
|
+ next->bio = NULL;
|
|
|
|
+ __blk_put_request(q, next);
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int attempt_back_merge(struct request_queue *q, struct request *rq)
|
|
|
|
+{
|
|
|
|
+ struct request *next = elv_latter_request(q, rq);
|
|
|
|
+
|
|
|
|
+ if (next)
|
|
|
|
+ return attempt_merge(q, rq, next);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int attempt_front_merge(struct request_queue *q, struct request *rq)
|
|
|
|
+{
|
|
|
|
+ struct request *prev = elv_former_request(q, rq);
|
|
|
|
+
|
|
|
|
+ if (prev)
|
|
|
|
+ return attempt_merge(q, prev, rq);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
|
|
|
+ struct request *next)
|
|
|
|
+{
|
|
|
|
+ return attempt_merge(q, rq, next);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
|
|
|
+{
|
|
|
|
+ if (!rq_mergeable(rq) || !bio_mergeable(bio))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ /* different data direction or already started, don't merge */
|
|
|
|
+ if (bio_data_dir(bio) != rq_data_dir(rq))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ /* must be same device and not a special request */
|
|
|
|
+ if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ /* only merge integrity protected bio into ditto rq */
|
|
|
|
+ if (bio_integrity(bio) != blk_integrity_rq(rq))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ /* must be using the same buffer */
|
|
|
|
+ if (rq->cmd_flags & REQ_WRITE_SAME &&
|
|
|
|
+ !blk_write_same_mergeable(rq->bio, bio))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int blk_try_merge(struct request *rq, struct bio *bio)
|
|
|
|
+{
|
|
|
|
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
|
|
|
|
+ return ELEVATOR_BACK_MERGE;
|
|
|
|
+ else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
|
|
|
|
+ return ELEVATOR_FRONT_MERGE;
|
|
|
|
+ return ELEVATOR_NO_MERGE;
|
|
|
|
+}
|