|  | @@ -178,3 +178,93 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
 | 
	
		
			
				|  |  |  		 * flush data request completion path.  Restore @rq for
 | 
	
		
			
				|  |  |  		 * normal completion and end it.
 | 
	
		
			
				|  |  |  		 */
 | 
	
		
			
				|  |  | +		BUG_ON(!list_empty(&rq->queuelist));
 | 
	
		
			
				|  |  | +		list_del_init(&rq->flush.list);
 | 
	
		
			
				|  |  | +		blk_flush_restore_request(rq);
 | 
	
		
			
				|  |  | +		__blk_end_request_all(rq, error);
 | 
	
		
			
				|  |  | +		break;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	default:
 | 
	
		
			
				|  |  | +		BUG();
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return blk_kick_flush(q) | queued;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static void flush_end_io(struct request *flush_rq, int error)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	struct request_queue *q = flush_rq->q;
 | 
	
		
			
				|  |  | +	struct list_head *running = &q->flush_queue[q->flush_running_idx];
 | 
	
		
			
				|  |  | +	bool queued = false;
 | 
	
		
			
				|  |  | +	struct request *rq, *n;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	BUG_ON(q->flush_pending_idx == q->flush_running_idx);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/* account completion of the flush request */
 | 
	
		
			
				|  |  | +	q->flush_running_idx ^= 1;
 | 
	
		
			
				|  |  | +	elv_completed_request(q, flush_rq);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/* and push the waiting requests to the next stage */
 | 
	
		
			
				|  |  | +	list_for_each_entry_safe(rq, n, running, flush.list) {
 | 
	
		
			
				|  |  | +		unsigned int seq = blk_flush_cur_seq(rq);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
 | 
	
		
			
				|  |  | +		queued |= blk_flush_complete_seq(rq, seq, error);
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/*
 | 
	
		
			
				|  |  | +	 * Kick the queue to avoid stall for two cases:
 | 
	
		
			
				|  |  | +	 * 1. Moving a request silently to empty queue_head may stall the
 | 
	
		
			
				|  |  | +	 * queue.
 | 
	
		
			
				|  |  | +	 * 2. When flush request is running in non-queueable queue, the
 | 
	
		
			
				|  |  | +	 * queue is hold. Restart the queue after flush request is finished
 | 
	
		
			
				|  |  | +	 * to avoid stall.
 | 
	
		
			
				|  |  | +	 * This function is called from request completion path and calling
 | 
	
		
			
				|  |  | +	 * directly into request_fn may confuse the driver.  Always use
 | 
	
		
			
				|  |  | +	 * kblockd.
 | 
	
		
			
				|  |  | +	 */
 | 
	
		
			
				|  |  | +	if (queued || q->flush_queue_delayed)
 | 
	
		
			
				|  |  | +		blk_run_queue_async(q);
 | 
	
		
			
				|  |  | +	q->flush_queue_delayed = 0;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blk_kick_flush - consider issuing flush request
 | 
	
		
			
				|  |  | + * @q: request_queue being kicked
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Flush related states of @q have changed, consider issuing flush request.
 | 
	
		
			
				|  |  | + * Please read the comment at the top of this file for more info.
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * CONTEXT:
 | 
	
		
			
				|  |  | + * spin_lock_irq(q->queue_lock)
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * RETURNS:
 | 
	
		
			
				|  |  | + * %true if flush was issued, %false otherwise.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static bool blk_kick_flush(struct request_queue *q)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
 | 
	
		
			
				|  |  | +	struct request *first_rq =
 | 
	
		
			
				|  |  | +		list_first_entry(pending, struct request, flush.list);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/* C1 described at the top of this file */
 | 
	
		
			
				|  |  | +	if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
 | 
	
		
			
				|  |  | +		return false;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/* C2 and C3 */
 | 
	
		
			
				|  |  | +	if (!list_empty(&q->flush_data_in_flight) &&
 | 
	
		
			
				|  |  | +	    time_before(jiffies,
 | 
	
		
			
				|  |  | +			q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
 | 
	
		
			
				|  |  | +		return false;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/*
 | 
	
		
			
				|  |  | +	 * Issue flush and toggle pending_idx.  This makes pending_idx
 | 
	
		
			
				|  |  | +	 * different from running_idx, which means flush is in flight.
 | 
	
		
			
				|  |  | +	 */
 | 
	
		
			
				|  |  | +	blk_rq_init(q, &q->flush_rq);
 | 
	
		
			
				|  |  | +	q->flush_rq.cmd_type = REQ_TYPE_FS;
 | 
	
		
			
				|  |  | +	q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
 | 
	
		
			
				|  |  | +	q->flush_rq.rq_disk = first_rq->rq_disk;
 | 
	
		
			
				|  |  | +	q->flush_rq.end_io = flush_end_io;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	q->flush_pending_idx ^= 1;
 |