|
@@ -2668,3 +2668,102 @@ EXPORT_SYMBOL(__blk_end_request_all);
|
|
|
* %false - we are done with this request
|
|
|
* %true - still buffers pending for this request
|
|
|
*/
|
|
|
+bool __blk_end_request_cur(struct request *rq, int error)
|
|
|
+{
|
|
|
+ return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__blk_end_request_cur);
|
|
|
+
|
|
|
+/**
|
|
|
+ * __blk_end_request_err - Finish a request till the next failure boundary.
|
|
|
+ * @rq: the request to finish till the next failure boundary for
|
|
|
+ * @error: must be negative errno
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * Complete @rq till the next failure boundary. Must be called
|
|
|
+ * with queue lock held.
|
|
|
+ *
|
|
|
+ * Return:
|
|
|
+ * %false - we are done with this request
|
|
|
+ * %true - still buffers pending for this request
|
|
|
+ */
|
|
|
+bool __blk_end_request_err(struct request *rq, int error)
|
|
|
+{
|
|
|
+ WARN_ON(error >= 0);
|
|
|
+ return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__blk_end_request_err);
|
|
|
+
|
|
|
+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
|
|
+ struct bio *bio)
|
|
|
+{
|
|
|
+ /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
|
|
|
+ rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
|
|
|
+
|
|
|
+ if (bio_has_data(bio)) {
|
|
|
+ rq->nr_phys_segments = bio_phys_segments(q, bio);
|
|
|
+ rq->buffer = bio_data(bio);
|
|
|
+ }
|
|
|
+ rq->__data_len = bio->bi_size;
|
|
|
+ rq->bio = rq->biotail = bio;
|
|
|
+
|
|
|
+ if (bio->bi_bdev)
|
|
|
+ rq->rq_disk = bio->bi_bdev->bd_disk;
|
|
|
+}
|
|
|
+
|
|
|
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
|
+/**
|
|
|
+ * rq_flush_dcache_pages - Helper function to flush all pages in a request
|
|
|
+ * @rq: the request to be flushed
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * Flush all pages in @rq.
|
|
|
+ */
|
|
|
+void rq_flush_dcache_pages(struct request *rq)
|
|
|
+{
|
|
|
+ struct req_iterator iter;
|
|
|
+ struct bio_vec *bvec;
|
|
|
+
|
|
|
+ rq_for_each_segment(bvec, rq, iter)
|
|
|
+ flush_dcache_page(bvec->bv_page);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
|
|
|
+#endif
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_lld_busy - Check if underlying low-level drivers of a device are busy
|
|
|
+ * @q : the queue of the device being checked
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * Check if underlying low-level drivers of a device are busy.
|
|
|
+ * If the drivers want to export their busy state, they must set own
|
|
|
+ * exporting function using blk_queue_lld_busy() first.
|
|
|
+ *
|
|
|
+ * Basically, this function is used only by request stacking drivers
|
|
|
+ * to stop dispatching requests to underlying devices when underlying
|
|
|
+ * devices are busy. This behavior helps more I/O merging on the queue
|
|
|
+ * of the request stacking driver and prevents I/O throughput regression
|
|
|
+ * on burst I/O load.
|
|
|
+ *
|
|
|
+ * Return:
|
|
|
+ * 0 - Not busy (The request stacking driver should dispatch request)
|
|
|
+ * 1 - Busy (The request stacking driver should stop dispatching request)
|
|
|
+ */
|
|
|
+int blk_lld_busy(struct request_queue *q)
|
|
|
+{
|
|
|
+ if (q->lld_busy_fn)
|
|
|
+ return q->lld_busy_fn(q);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_lld_busy);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
|
|
|
+ * @rq: the clone request to be cleaned up
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * Free all bios in @rq for a cloned request.
|
|
|
+ */
|
|
|
+void blk_rq_unprep_clone(struct request *rq)
|
|
|
+{
|