|
@@ -2229,3 +2229,198 @@ struct request *blk_fetch_request(struct request_queue *q)
|
|
|
|
|
|
rq = blk_peek_request(q);
|
|
|
if (rq)
|
|
|
+ blk_start_request(rq);
|
|
|
+ return rq;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_fetch_request);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_update_request - Special helper function for request stacking drivers
|
|
|
+ * @req: the request being processed
|
|
|
+ * @error: %0 for success, < %0 for error
|
|
|
+ * @nr_bytes: number of bytes to complete @req
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * Ends I/O on a number of bytes attached to @req, but doesn't complete
|
|
|
+ * the request structure even if @req doesn't have leftover.
|
|
|
+ * If @req has leftover, sets it up for the next range of segments.
|
|
|
+ *
|
|
|
+ * This special helper function is only for request stacking drivers
|
|
|
+ * (e.g. request-based dm) so that they can handle partial completion.
|
|
|
+ * Actual device drivers should use blk_end_request instead.
|
|
|
+ *
|
|
|
+ * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
|
|
|
+ * %false return from this function.
|
|
|
+ *
|
|
|
+ * Return:
|
|
|
+ * %false - this request doesn't have any more data
|
|
|
+ * %true - this request has more data
|
|
|
+ **/
|
|
|
+bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|
|
+{
|
|
|
+ int total_bytes, bio_nbytes, next_idx = 0;
|
|
|
+ struct bio *bio;
|
|
|
+
|
|
|
+ if (!req->bio)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ trace_block_rq_complete(req->q, req);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For fs requests, rq is just carrier of independent bio's
|
|
|
+ * and each partial completion should be handled separately.
|
|
|
+ * Reset per-request error on each partial completion.
|
|
|
+ *
|
|
|
+ * TODO: tj: This is too subtle. It would be better to let
|
|
|
+ * low level drivers do what they see fit.
|
|
|
+ */
|
|
|
+ if (req->cmd_type == REQ_TYPE_FS)
|
|
|
+ req->errors = 0;
|
|
|
+
|
|
|
+ if (error && req->cmd_type == REQ_TYPE_FS &&
|
|
|
+ !(req->cmd_flags & REQ_QUIET)) {
|
|
|
+ char *error_type;
|
|
|
+
|
|
|
+ switch (error) {
|
|
|
+ case -ENOLINK:
|
|
|
+ error_type = "recoverable transport";
|
|
|
+ break;
|
|
|
+ case -EREMOTEIO:
|
|
|
+ error_type = "critical target";
|
|
|
+ break;
|
|
|
+ case -EBADE:
|
|
|
+ error_type = "critical nexus";
|
|
|
+ break;
|
|
|
+ case -EIO:
|
|
|
+ default:
|
|
|
+ error_type = "I/O";
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
|
|
|
+ error_type, req->rq_disk ?
|
|
|
+ req->rq_disk->disk_name : "?",
|
|
|
+ (unsigned long long)blk_rq_pos(req));
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ blk_account_io_completion(req, nr_bytes);
|
|
|
+
|
|
|
+ total_bytes = bio_nbytes = 0;
|
|
|
+ while ((bio = req->bio) != NULL) {
|
|
|
+ int nbytes;
|
|
|
+
|
|
|
+ if (nr_bytes >= bio->bi_size) {
|
|
|
+ req->bio = bio->bi_next;
|
|
|
+ nbytes = bio->bi_size;
|
|
|
+ req_bio_endio(req, bio, nbytes, error);
|
|
|
+ next_idx = 0;
|
|
|
+ bio_nbytes = 0;
|
|
|
+ } else {
|
|
|
+ int idx = bio->bi_idx + next_idx;
|
|
|
+
|
|
|
+ if (unlikely(idx >= bio->bi_vcnt)) {
|
|
|
+ blk_dump_rq_flags(req, "__end_that");
|
|
|
+ printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
|
|
|
+ __func__, idx, bio->bi_vcnt);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ nbytes = bio_iovec_idx(bio, idx)->bv_len;
|
|
|
+ BIO_BUG_ON(nbytes > bio->bi_size);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * not a complete bvec done
|
|
|
+ */
|
|
|
+ if (unlikely(nbytes > nr_bytes)) {
|
|
|
+ bio_nbytes += nr_bytes;
|
|
|
+ total_bytes += nr_bytes;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * advance to the next vector
|
|
|
+ */
|
|
|
+ next_idx++;
|
|
|
+ bio_nbytes += nbytes;
|
|
|
+ }
|
|
|
+
|
|
|
+ total_bytes += nbytes;
|
|
|
+ nr_bytes -= nbytes;
|
|
|
+
|
|
|
+ bio = req->bio;
|
|
|
+ if (bio) {
|
|
|
+ /*
|
|
|
+ * end more in this run, or just return 'not-done'
|
|
|
+ */
|
|
|
+ if (unlikely(nr_bytes <= 0))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * completely done
|
|
|
+ */
|
|
|
+ if (!req->bio) {
|
|
|
+ /*
|
|
|
+ * Reset counters so that the request stacking driver
|
|
|
+ * can find how many bytes remain in the request
|
|
|
+ * later.
|
|
|
+ */
|
|
|
+ req->__data_len = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * if the request wasn't completed, update state
|
|
|
+ */
|
|
|
+ if (bio_nbytes) {
|
|
|
+ req_bio_endio(req, bio, bio_nbytes, error);
|
|
|
+ bio->bi_idx += next_idx;
|
|
|
+ bio_iovec(bio)->bv_offset += nr_bytes;
|
|
|
+ bio_iovec(bio)->bv_len -= nr_bytes;
|
|
|
+ }
|
|
|
+
|
|
|
+ req->__data_len -= total_bytes;
|
|
|
+ req->buffer = bio_data(req->bio);
|
|
|
+
|
|
|
+ /* update sector only for requests with clear definition of sector */
|
|
|
+ if (req->cmd_type == REQ_TYPE_FS)
|
|
|
+ req->__sector += total_bytes >> 9;
|
|
|
+
|
|
|
+ /* mixed attributes always follow the first bio */
|
|
|
+ if (req->cmd_flags & REQ_MIXED_MERGE) {
|
|
|
+ req->cmd_flags &= ~REQ_FAILFAST_MASK;
|
|
|
+ req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If total number of sectors is less than the first segment
|
|
|
+ * size, something has gone terribly wrong.
|
|
|
+ */
|
|
|
+ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
|
|
|
+ blk_dump_rq_flags(req, "request botched");
|
|
|
+ req->__data_len = blk_rq_cur_bytes(req);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* recalculate the number of segments */
|
|
|
+ blk_recalc_rq_segments(req);
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_update_request);
|
|
|
+
|
|
|
+static bool blk_update_bidi_request(struct request *rq, int error,
|
|
|
+ unsigned int nr_bytes,
|
|
|
+ unsigned int bidi_bytes)
|
|
|
+{
|
|
|
+ if (blk_update_request(rq, error, nr_bytes))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /* Bidi request must be completed as a whole */
|
|
|
+ if (unlikely(blk_bidi_rq(rq)) &&
|
|
|
+ blk_update_request(rq->next_rq, error, bidi_bytes))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (blk_queue_add_random(rq->q))
|
|
|
+ add_disk_randomness(rq->rq_disk);
|
|
|
+
|