|
@@ -233,3 +233,103 @@ EXPORT_SYMBOL(blk_delay_queue);
|
|
|
/**
|
|
|
* blk_start_queue - restart a previously stopped queue
|
|
|
* @q: The &struct request_queue in question
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * blk_start_queue() will clear the stop flag on the queue, and call
|
|
|
+ * the request_fn for the queue if it was in a stopped state when
|
|
|
+ * entered. Also see blk_stop_queue(). Queue lock must be held.
|
|
|
+ **/
|
|
|
+void blk_start_queue(struct request_queue *q)
|
|
|
+{
|
|
|
+ WARN_ON(!irqs_disabled());
|
|
|
+
|
|
|
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
|
|
+ __blk_run_queue(q);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_start_queue);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_stop_queue - stop a queue
|
|
|
+ * @q: The &struct request_queue in question
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * The Linux block layer assumes that a block driver will consume all
|
|
|
+ * entries on the request queue when the request_fn strategy is called.
|
|
|
+ * Often this will not happen, because of hardware limitations (queue
|
|
|
+ * depth settings). If a device driver gets a 'queue full' response,
|
|
|
+ * or if it simply chooses not to queue more I/O at one point, it can
|
|
|
+ * call this function to prevent the request_fn from being called until
|
|
|
+ * the driver has signalled it's ready to go again. This happens by calling
|
|
|
+ * blk_start_queue() to restart queue operations. Queue lock must be held.
|
|
|
+ **/
|
|
|
+void blk_stop_queue(struct request_queue *q)
|
|
|
+{
|
|
|
+ cancel_delayed_work(&q->delay_work);
|
|
|
+ queue_flag_set(QUEUE_FLAG_STOPPED, q);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_stop_queue);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_sync_queue - cancel any pending callbacks on a queue
|
|
|
+ * @q: the queue
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * The block layer may perform asynchronous callback activity
|
|
|
+ * on a queue, such as calling the unplug function after a timeout.
|
|
|
+ * A block device may call blk_sync_queue to ensure that any
|
|
|
+ * such activity is cancelled, thus allowing it to release resources
|
|
|
+ * that the callbacks might use. The caller must already have made sure
|
|
|
+ * that its ->make_request_fn will not re-add plugging prior to calling
|
|
|
+ * this function.
|
|
|
+ *
|
|
|
+ * This function does not cancel any asynchronous activity arising
|
|
|
+ * out of elevator or throttling code. That would require elevaotor_exit()
|
|
|
+ * and blkcg_exit_queue() to be called with queue lock initialized.
|
|
|
+ *
|
|
|
+ */
|
|
|
+void blk_sync_queue(struct request_queue *q)
|
|
|
+{
|
|
|
+ del_timer_sync(&q->timeout);
|
|
|
+ cancel_delayed_work_sync(&q->delay_work);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_sync_queue);
|
|
|
+
|
|
|
+/**
|
|
|
+ * __blk_run_queue_uncond - run a queue whether or not it has been stopped
|
|
|
+ * @q: The queue to run
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * Invoke request handling on a queue if there are any pending requests.
|
|
|
+ * May be used to restart request handling after a request has completed.
|
|
|
+ * This variant runs the queue whether or not the queue has been
|
|
|
+ * stopped. Must be called with the queue lock held and interrupts
|
|
|
+ * disabled. See also @blk_run_queue.
|
|
|
+ */
|
|
|
+inline void __blk_run_queue_uncond(struct request_queue *q)
|
|
|
+{
|
|
|
+ if (unlikely(blk_queue_dead(q)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Some request_fn implementations, e.g. scsi_request_fn(), unlock
|
|
|
+ * the queue lock internally. As a result multiple threads may be
|
|
|
+ * running such a request function concurrently. Keep track of the
|
|
|
+ * number of active request_fn invocations such that blk_drain_queue()
|
|
|
+ * can wait until all these request_fn calls have finished.
|
|
|
+ */
|
|
|
+ q->request_fn_active++;
|
|
|
+ q->request_fn(q);
|
|
|
+ q->request_fn_active--;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * __blk_run_queue - run a single device queue
|
|
|
+ * @q: The queue to run
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * See @blk_run_queue. This variant must be called with the queue lock
|
|
|
+ * held and interrupts disabled.
|
|
|
+ */
|
|
|
+void __blk_run_queue(struct request_queue *q)
|
|
|
+{
|
|
|
+ if (unlikely(blk_queue_stopped(q)))
|