|
@@ -1867,3 +1867,86 @@ void submit_bio(int rw, struct bio *bio)
|
|
|
if (rw & WRITE) {
|
|
|
count_vm_events(PGPGOUT, count);
|
|
|
} else {
|
|
|
+ task_io_account_read(bio->bi_size);
|
|
|
+ count_vm_events(PGPGIN, count);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(block_dump)) {
|
|
|
+ char b[BDEVNAME_SIZE];
|
|
|
+ printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
|
|
|
+ current->comm, task_pid_nr(current),
|
|
|
+ (rw & WRITE) ? "WRITE" : "READ",
|
|
|
+ (unsigned long long)bio->bi_sector,
|
|
|
+ bdevname(bio->bi_bdev, b),
|
|
|
+ count);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ generic_make_request(bio);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(submit_bio);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_rq_check_limits - Helper function to check a request for the queue limit
|
|
|
+ * @q: the queue
|
|
|
+ * @rq: the request being checked
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * @rq may have been made based on weaker limitations of upper-level queues
|
|
|
+ * in request stacking drivers, and it may violate the limitation of @q.
|
|
|
+ * Since the block layer and the underlying device driver trust @rq
|
|
|
+ * after it is inserted to @q, it should be checked against @q before
|
|
|
+ * the insertion using this generic function.
|
|
|
+ *
|
|
|
+ * This function should also be useful for request stacking drivers
|
|
|
+ * in some cases below, so export this function.
|
|
|
+ * Request stacking drivers like request-based dm may change the queue
|
|
|
+ * limits while requests are in the queue (e.g. dm's table swapping).
|
|
|
+ * Such request stacking drivers should check those requests agaist
|
|
|
+ * the new queue limits again when they dispatch those requests,
|
|
|
+ * although such checkings are also done against the old queue limits
|
|
|
+ * when submitting requests.
|
|
|
+ */
|
|
|
+int blk_rq_check_limits(struct request_queue *q, struct request *rq)
|
|
|
+{
|
|
|
+ if (!rq_mergeable(rq))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
|
|
|
+ printk(KERN_ERR "%s: over max size limit.\n", __func__);
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * queue's settings related to segment counting like q->bounce_pfn
|
|
|
+ * may differ from that of other stacking queues.
|
|
|
+ * Recalculate it to check the request correctly on this queue's
|
|
|
+ * limitation.
|
|
|
+ */
|
|
|
+ blk_recalc_rq_segments(rq);
|
|
|
+ if (rq->nr_phys_segments > queue_max_segments(q)) {
|
|
|
+ printk(KERN_ERR "%s: over max segments limit.\n", __func__);
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_rq_check_limits);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
|
|
|
+ * @q: the queue to submit the request
|
|
|
+ * @rq: the request being queued
|
|
|
+ */
|
|
|
+int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int where = ELEVATOR_INSERT_BACK;
|
|
|
+
|
|
|
+ if (blk_rq_check_limits(q, rq))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ if (rq->rq_disk &&
|
|
|
+ should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
|
|
|
+ return -EIO;
|
|
|
+
|