|  | @@ -333,3 +333,105 @@ inline void __blk_run_queue_uncond(struct request_queue *q)
 | 
	
		
			
				|  |  |  void __blk_run_queue(struct request_queue *q)
 | 
	
		
			
				|  |  |  {
 | 
	
		
			
				|  |  |  	if (unlikely(blk_queue_stopped(q)))
 | 
	
		
			
				|  |  | +		return;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	__blk_run_queue_uncond(q);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +EXPORT_SYMBOL(__blk_run_queue);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blk_run_queue_async - run a single device queue in workqueue context
 | 
	
		
			
				|  |  | + * @q:	The queue to run
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Description:
 | 
	
		
			
				|  |  | + *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
 | 
	
		
			
				|  |  | + *    of us. The caller must hold the queue lock.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +void blk_run_queue_async(struct request_queue *q)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
 | 
	
		
			
				|  |  | +		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +EXPORT_SYMBOL(blk_run_queue_async);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blk_run_queue - run a single device queue
 | 
	
		
			
				|  |  | + * @q: The queue to run
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Description:
 | 
	
		
			
				|  |  | + *    Invoke request handling on this queue, if it has pending work to do.
 | 
	
		
			
				|  |  | + *    May be used to restart queueing when a request has completed.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +void blk_run_queue(struct request_queue *q)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	unsigned long flags;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	spin_lock_irqsave(q->queue_lock, flags);
 | 
	
		
			
				|  |  | +	__blk_run_queue(q);
 | 
	
		
			
				|  |  | +	spin_unlock_irqrestore(q->queue_lock, flags);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +EXPORT_SYMBOL(blk_run_queue);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +void blk_put_queue(struct request_queue *q)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	kobject_put(&q->kobj);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +EXPORT_SYMBOL(blk_put_queue);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * __blk_drain_queue - drain requests from request_queue
 | 
	
		
			
				|  |  | + * @q: queue to drain
 | 
	
		
			
				|  |  | + * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Drain requests from @q.  If @drain_all is set, all requests are drained.
 | 
	
		
			
				|  |  | + * If not, only ELVPRIV requests are drained.  The caller is responsible
 | 
	
		
			
				|  |  | + * for ensuring that no new requests which need to be drained are queued.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static void __blk_drain_queue(struct request_queue *q, bool drain_all)
 | 
	
		
			
				|  |  | +	__releases(q->queue_lock)
 | 
	
		
			
				|  |  | +	__acquires(q->queue_lock)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	int i;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	lockdep_assert_held(q->queue_lock);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	while (true) {
 | 
	
		
			
				|  |  | +		bool drain = false;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		/*
 | 
	
		
			
				|  |  | +		 * The caller might be trying to drain @q before its
 | 
	
		
			
				|  |  | +		 * elevator is initialized.
 | 
	
		
			
				|  |  | +		 */
 | 
	
		
			
				|  |  | +		if (q->elevator)
 | 
	
		
			
				|  |  | +			elv_drain_elevator(q);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		blkcg_drain_queue(q);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		/*
 | 
	
		
			
				|  |  | +		 * This function might be called on a queue which failed
 | 
	
		
			
				|  |  | +		 * driver init after queue creation or is not yet fully
 | 
	
		
			
				|  |  | +		 * active yet.  Some drivers (e.g. fd and loop) get unhappy
 | 
	
		
			
				|  |  | +		 * in such cases.  Kick queue iff dispatch queue has
 | 
	
		
			
				|  |  | +		 * something on it and @q has request_fn set.
 | 
	
		
			
				|  |  | +		 */
 | 
	
		
			
				|  |  | +		if (!list_empty(&q->queue_head) && q->request_fn)
 | 
	
		
			
				|  |  | +			__blk_run_queue(q);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		drain |= q->nr_rqs_elvpriv;
 | 
	
		
			
				|  |  | +		drain |= q->request_fn_active;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		/*
 | 
	
		
			
				|  |  | +		 * Unfortunately, requests are queued at and tracked from
 | 
	
		
			
				|  |  | +		 * multiple places and there's no single counter which can
 | 
	
		
			
				|  |  | +		 * be drained.  Check all the queues and counters.
 | 
	
		
			
				|  |  | +		 */
 | 
	
		
			
				|  |  | +		if (drain_all) {
 | 
	
		
			
				|  |  | +			drain |= !list_empty(&q->queue_head);
 | 
	
		
			
				|  |  | +			for (i = 0; i < 2; i++) {
 | 
	
		
			
				|  |  | +				drain |= q->nr_rqs[i];
 | 
	
		
			
				|  |  | +				drain |= q->in_flight[i];
 | 
	
		
			
				|  |  | +				drain |= !list_empty(&q->flush_queue[i]);
 | 
	
		
			
				|  |  | +			}
 | 
	
		
			
				|  |  | +		}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		if (!drain)
 |