|
@@ -2105,3 +2105,65 @@ struct request *blk_peek_request(struct request_queue *q)
|
|
|
|
|
|
if (rq->cmd_flags & REQ_DONTPREP)
|
|
|
break;
|
|
|
+
|
|
|
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
|
|
|
+ /*
|
|
|
+ * make sure space for the drain appears we
|
|
|
+ * know we can do this because max_hw_segments
|
|
|
+ * has been adjusted to be one fewer than the
|
|
|
+ * device can handle
|
|
|
+ */
|
|
|
+ rq->nr_phys_segments++;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!q->prep_rq_fn)
|
|
|
+ break;
|
|
|
+
|
|
|
+ ret = q->prep_rq_fn(q, rq);
|
|
|
+ if (ret == BLKPREP_OK) {
|
|
|
+ break;
|
|
|
+ } else if (ret == BLKPREP_DEFER) {
|
|
|
+ /*
|
|
|
+ * the request may have been (partially) prepped.
|
|
|
+ * we need to keep this request in the front to
|
|
|
+ * avoid resource deadlock. REQ_STARTED will
|
|
|
+ * prevent other fs requests from passing this one.
|
|
|
+ */
|
|
|
+ if (q->dma_drain_size && blk_rq_bytes(rq) &&
|
|
|
+ !(rq->cmd_flags & REQ_DONTPREP)) {
|
|
|
+ /*
|
|
|
+ * remove the space for the drain we added
|
|
|
+ * so that we don't add it again
|
|
|
+ */
|
|
|
+ --rq->nr_phys_segments;
|
|
|
+ }
|
|
|
+
|
|
|
+ rq = NULL;
|
|
|
+ break;
|
|
|
+ } else if (ret == BLKPREP_KILL) {
|
|
|
+ rq->cmd_flags |= REQ_QUIET;
|
|
|
+ /*
|
|
|
+ * Mark this request as started so we don't trigger
|
|
|
+ * any debug logic in the end I/O path.
|
|
|
+ */
|
|
|
+ blk_start_request(rq);
|
|
|
+ __blk_end_request_all(rq, -EIO);
|
|
|
+ } else {
|
|
|
+ printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return rq;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_peek_request);
|
|
|
+
|
|
|
+void blk_dequeue_request(struct request *rq)
|
|
|
+{
|
|
|
+ struct request_queue *q = rq->q;
|
|
|
+
|
|
|
+ BUG_ON(list_empty(&rq->queuelist));
|
|
|
+ BUG_ON(ELV_ON_HASH(rq));
|
|
|
+
|
|
|
+ list_del_init(&rq->queuelist);
|
|
|
+
|