|
@@ -2767,3 +2767,201 @@ EXPORT_SYMBOL_GPL(blk_lld_busy);
|
|
|
*/
|
|
|
void blk_rq_unprep_clone(struct request *rq)
|
|
|
{
|
|
|
+ struct bio *bio;
|
|
|
+
|
|
|
+ while ((bio = rq->bio) != NULL) {
|
|
|
+ rq->bio = bio->bi_next;
|
|
|
+
|
|
|
+ bio_put(bio);
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Copy attributes of the original request to the clone request.
|
|
|
+ * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
|
|
|
+ */
|
|
|
+static void __blk_rq_prep_clone(struct request *dst, struct request *src)
|
|
|
+{
|
|
|
+ dst->cpu = src->cpu;
|
|
|
+ dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
|
|
|
+ dst->cmd_type = src->cmd_type;
|
|
|
+ dst->__sector = blk_rq_pos(src);
|
|
|
+ dst->__data_len = blk_rq_bytes(src);
|
|
|
+ dst->nr_phys_segments = src->nr_phys_segments;
|
|
|
+ dst->ioprio = src->ioprio;
|
|
|
+ dst->extra_len = src->extra_len;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_rq_prep_clone - Helper function to setup clone request
|
|
|
+ * @rq: the request to be setup
|
|
|
+ * @rq_src: original request to be cloned
|
|
|
+ * @bs: bio_set that bios for clone are allocated from
|
|
|
+ * @gfp_mask: memory allocation mask for bio
|
|
|
+ * @bio_ctr: setup function to be called for each clone bio.
|
|
|
+ * Returns %0 for success, non %0 for failure.
|
|
|
+ * @data: private data to be passed to @bio_ctr
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
|
|
|
+ * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
|
|
|
+ * are not copied, and copying such parts is the caller's responsibility.
|
|
|
+ * Also, pages which the original bios are pointing to are not copied
|
|
|
+ * and the cloned bios just point same pages.
|
|
|
+ * So cloned bios must be completed before original bios, which means
|
|
|
+ * the caller must complete @rq before @rq_src.
|
|
|
+ */
|
|
|
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
|
|
+ struct bio_set *bs, gfp_t gfp_mask,
|
|
|
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
|
|
|
+ void *data)
|
|
|
+{
|
|
|
+ struct bio *bio, *bio_src;
|
|
|
+
|
|
|
+ if (!bs)
|
|
|
+ bs = fs_bio_set;
|
|
|
+
|
|
|
+ blk_rq_init(NULL, rq);
|
|
|
+
|
|
|
+ __rq_for_each_bio(bio_src, rq_src) {
|
|
|
+ bio = bio_clone_bioset(bio_src, gfp_mask, bs);
|
|
|
+ if (!bio)
|
|
|
+ goto free_and_out;
|
|
|
+
|
|
|
+ if (bio_ctr && bio_ctr(bio, bio_src, data))
|
|
|
+ goto free_and_out;
|
|
|
+
|
|
|
+ if (rq->bio) {
|
|
|
+ rq->biotail->bi_next = bio;
|
|
|
+ rq->biotail = bio;
|
|
|
+ } else
|
|
|
+ rq->bio = rq->biotail = bio;
|
|
|
+ }
|
|
|
+
|
|
|
+ __blk_rq_prep_clone(rq, rq_src);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+free_and_out:
|
|
|
+ if (bio)
|
|
|
+ bio_put(bio);
|
|
|
+ blk_rq_unprep_clone(rq);
|
|
|
+
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
|
|
|
+
|
|
|
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
|
|
|
+{
|
|
|
+ return queue_work(kblockd_workqueue, work);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(kblockd_schedule_work);
|
|
|
+
|
|
|
+int kblockd_schedule_delayed_work(struct request_queue *q,
|
|
|
+ struct delayed_work *dwork, unsigned long delay)
|
|
|
+{
|
|
|
+ return queue_delayed_work(kblockd_workqueue, dwork, delay);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(kblockd_schedule_delayed_work);
|
|
|
+
|
|
|
+#define PLUG_MAGIC 0x91827364
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_start_plug - initialize blk_plug and track it inside the task_struct
|
|
|
+ * @plug: The &struct blk_plug that needs to be initialized
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * Tracking blk_plug inside the task_struct will help with auto-flushing the
|
|
|
+ * pending I/O should the task end up blocking between blk_start_plug() and
|
|
|
+ * blk_finish_plug(). This is important from a performance perspective, but
|
|
|
+ * also ensures that we don't deadlock. For instance, if the task is blocking
|
|
|
+ * for a memory allocation, memory reclaim could end up wanting to free a
|
|
|
+ * page belonging to that request that is currently residing in our private
|
|
|
+ * plug. By flushing the pending I/O when the process goes to sleep, we avoid
|
|
|
+ * this kind of deadlock.
|
|
|
+ */
|
|
|
+void blk_start_plug(struct blk_plug *plug)
|
|
|
+{
|
|
|
+ struct task_struct *tsk = current;
|
|
|
+
|
|
|
+ plug->magic = PLUG_MAGIC;
|
|
|
+ INIT_LIST_HEAD(&plug->list);
|
|
|
+ INIT_LIST_HEAD(&plug->cb_list);
|
|
|
+ plug->should_sort = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this is a nested plug, don't actually assign it. It will be
|
|
|
+ * flushed on its own.
|
|
|
+ */
|
|
|
+ if (!tsk->plug) {
|
|
|
+ /*
|
|
|
+ * Store ordering should not be needed here, since a potential
|
|
|
+ * preempt will imply a full memory barrier
|
|
|
+ */
|
|
|
+ tsk->plug = plug;
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_start_plug);
|
|
|
+
|
|
|
+static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|
|
+{
|
|
|
+ struct request *rqa = container_of(a, struct request, queuelist);
|
|
|
+ struct request *rqb = container_of(b, struct request, queuelist);
|
|
|
+
|
|
|
+ return !(rqa->q < rqb->q ||
|
|
|
+ (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * If 'from_schedule' is true, then postpone the dispatch of requests
|
|
|
+ * until a safe kblockd context. We due this to avoid accidental big
|
|
|
+ * additional stack usage in driver dispatch, in places where the originally
|
|
|
+ * plugger did not intend it.
|
|
|
+ */
|
|
|
+static void queue_unplugged(struct request_queue *q, unsigned int depth,
|
|
|
+ bool from_schedule)
|
|
|
+ __releases(q->queue_lock)
|
|
|
+{
|
|
|
+ trace_block_unplug(q, depth, !from_schedule);
|
|
|
+
|
|
|
+ if (from_schedule)
|
|
|
+ blk_run_queue_async(q);
|
|
|
+ else
|
|
|
+ __blk_run_queue(q);
|
|
|
+ spin_unlock(q->queue_lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
|
|
|
+{
|
|
|
+ LIST_HEAD(callbacks);
|
|
|
+
|
|
|
+ while (!list_empty(&plug->cb_list)) {
|
|
|
+ list_splice_init(&plug->cb_list, &callbacks);
|
|
|
+
|
|
|
+ while (!list_empty(&callbacks)) {
|
|
|
+ struct blk_plug_cb *cb = list_first_entry(&callbacks,
|
|
|
+ struct blk_plug_cb,
|
|
|
+ list);
|
|
|
+ list_del(&cb->list);
|
|
|
+ cb->callback(cb, from_schedule);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
|
|
|
+ int size)
|
|
|
+{
|
|
|
+ struct blk_plug *plug = current->plug;
|
|
|
+ struct blk_plug_cb *cb;
|
|
|
+
|
|
|
+ if (!plug)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ list_for_each_entry(cb, &plug->cb_list, list)
|
|
|
+ if (cb->callback == unplug && cb->data == data)
|
|
|
+ return cb;
|
|
|
+
|
|
|
+ /* Not currently on the callback list */
|
|
|
+ BUG_ON(size < sizeof(*cb));
|
|
|
+ cb = kzalloc(size, GFP_ATOMIC);
|