|  | @@ -2965,3 +2965,112 @@ struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
 | 
	
		
			
				|  |  |  	/* Not currently on the callback list */
 | 
	
		
			
				|  |  |  	BUG_ON(size < sizeof(*cb));
 | 
	
		
			
				|  |  |  	cb = kzalloc(size, GFP_ATOMIC);
 | 
	
		
			
				|  |  | +	if (cb) {
 | 
	
		
			
				|  |  | +		cb->data = data;
 | 
	
		
			
				|  |  | +		cb->callback = unplug;
 | 
	
		
			
				|  |  | +		list_add(&cb->list, &plug->cb_list);
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +	return cb;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +EXPORT_SYMBOL(blk_check_plugged);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	struct request_queue *q;
 | 
	
		
			
				|  |  | +	unsigned long flags;
 | 
	
		
			
				|  |  | +	struct request *rq;
 | 
	
		
			
				|  |  | +	LIST_HEAD(list);
 | 
	
		
			
				|  |  | +	unsigned int depth;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	BUG_ON(plug->magic != PLUG_MAGIC);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	flush_plug_callbacks(plug, from_schedule);
 | 
	
		
			
				|  |  | +	if (list_empty(&plug->list))
 | 
	
		
			
				|  |  | +		return;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	list_splice_init(&plug->list, &list);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (plug->should_sort) {
 | 
	
		
			
				|  |  | +		list_sort(NULL, &list, plug_rq_cmp);
 | 
	
		
			
				|  |  | +		plug->should_sort = 0;
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	q = NULL;
 | 
	
		
			
				|  |  | +	depth = 0;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/*
 | 
	
		
			
				|  |  | +	 * Save and disable interrupts here, to avoid doing it for every
 | 
	
		
			
				|  |  | +	 * queue lock we have to take.
 | 
	
		
			
				|  |  | +	 */
 | 
	
		
			
				|  |  | +	local_irq_save(flags);
 | 
	
		
			
				|  |  | +	while (!list_empty(&list)) {
 | 
	
		
			
				|  |  | +		rq = list_entry_rq(list.next);
 | 
	
		
			
				|  |  | +		list_del_init(&rq->queuelist);
 | 
	
		
			
				|  |  | +		BUG_ON(!rq->q);
 | 
	
		
			
				|  |  | +		if (rq->q != q) {
 | 
	
		
			
				|  |  | +			/*
 | 
	
		
			
				|  |  | +			 * This drops the queue lock
 | 
	
		
			
				|  |  | +			 */
 | 
	
		
			
				|  |  | +			if (q)
 | 
	
		
			
				|  |  | +				queue_unplugged(q, depth, from_schedule);
 | 
	
		
			
				|  |  | +			q = rq->q;
 | 
	
		
			
				|  |  | +			depth = 0;
 | 
	
		
			
				|  |  | +			spin_lock(q->queue_lock);
 | 
	
		
			
				|  |  | +		}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		/*
 | 
	
		
			
				|  |  | +		 * Short-circuit if @q is dead
 | 
	
		
			
				|  |  | +		 */
 | 
	
		
			
				|  |  | +		if (unlikely(blk_queue_dying(q))) {
 | 
	
		
			
				|  |  | +			__blk_end_request_all(rq, -ENODEV);
 | 
	
		
			
				|  |  | +			continue;
 | 
	
		
			
				|  |  | +		}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		/*
 | 
	
		
			
				|  |  | +		 * rq is already accounted, so use raw insert
 | 
	
		
			
				|  |  | +		 */
 | 
	
		
			
				|  |  | +		if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
 | 
	
		
			
				|  |  | +			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
 | 
	
		
			
				|  |  | +		else
 | 
	
		
			
				|  |  | +			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +		depth++;
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/*
 | 
	
		
			
				|  |  | +	 * This drops the queue lock
 | 
	
		
			
				|  |  | +	 */
 | 
	
		
			
				|  |  | +	if (q)
 | 
	
		
			
				|  |  | +		queue_unplugged(q, depth, from_schedule);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	local_irq_restore(flags);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +void blk_finish_plug(struct blk_plug *plug)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	blk_flush_plug_list(plug, false);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (plug == current->plug)
 | 
	
		
			
				|  |  | +		current->plug = NULL;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +EXPORT_SYMBOL(blk_finish_plug);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +int __init blk_dev_init(void)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
 | 
	
		
			
				|  |  | +			sizeof(((struct request *)0)->cmd_flags));
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
 | 
	
		
			
				|  |  | +	kblockd_workqueue = alloc_workqueue("kblockd",
 | 
	
		
			
				|  |  | +					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 | 
	
		
			
				|  |  | +	if (!kblockd_workqueue)
 | 
	
		
			
				|  |  | +		panic("Failed to create kblockd\n");
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	request_cachep = kmem_cache_create("blkdev_requests",
 | 
	
		
			
				|  |  | +			sizeof(struct request), 0, SLAB_PANIC, NULL);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
 | 
	
		
			
				|  |  | +			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return 0;
 | 
	
		
			
				|  |  | +}
 |