|  | @@ -270,3 +270,98 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
 | 
	
		
			
				|  |  |  	struct blkcg_gq *blkg;
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |  	rcu_read_lock();
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	blkcg = bio_blkcg(bio);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/* bypass blkg lookup and use @q->root_rl directly for root */
 | 
	
		
			
				|  |  | +	if (blkcg == &blkcg_root)
 | 
	
		
			
				|  |  | +		goto root_rl;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	/*
 | 
	
		
			
				|  |  | +	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
 | 
	
		
			
				|  |  | +	 * or if either the blkcg or queue is going away.  Fall back to
 | 
	
		
			
				|  |  | +	 * root_rl in such cases.
 | 
	
		
			
				|  |  | +	 */
 | 
	
		
			
				|  |  | +	blkg = blkg_lookup_create(blkcg, q);
 | 
	
		
			
				|  |  | +	if (unlikely(IS_ERR(blkg)))
 | 
	
		
			
				|  |  | +		goto root_rl;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	blkg_get(blkg);
 | 
	
		
			
				|  |  | +	rcu_read_unlock();
 | 
	
		
			
				|  |  | +	return &blkg->rl;
 | 
	
		
			
				|  |  | +root_rl:
 | 
	
		
			
				|  |  | +	rcu_read_unlock();
 | 
	
		
			
				|  |  | +	return &q->root_rl;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blk_put_rl - put request_list
 | 
	
		
			
				|  |  | + * @rl: request_list to put
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Put the reference acquired by blk_get_rl().  Should be called under
 | 
	
		
			
				|  |  | + * queue_lock.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static inline void blk_put_rl(struct request_list *rl)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	/* root_rl may not have blkg set */
 | 
	
		
			
				|  |  | +	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
 | 
	
		
			
				|  |  | +		blkg_put(rl->blkg);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blk_rq_set_rl - associate a request with a request_list
 | 
	
		
			
				|  |  | + * @rq: request of interest
 | 
	
		
			
				|  |  | + * @rl: target request_list
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Associate @rq with @rl so that accounting and freeing can know the
 | 
	
		
			
				|  |  | + * request_list @rq came from.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	rq->rl = rl;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blk_rq_rl - return the request_list a request came from
 | 
	
		
			
				|  |  | + * @rq: request of interest
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Return the request_list @rq is allocated from.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static inline struct request_list *blk_rq_rl(struct request *rq)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	return rq->rl;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +struct request_list *__blk_queue_next_rl(struct request_list *rl,
 | 
	
		
			
				|  |  | +					 struct request_queue *q);
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Should be used under queue_lock.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +#define blk_queue_for_each_rl(rl, q)	\
 | 
	
		
			
				|  |  | +	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blkg_stat_add - add a value to a blkg_stat
 | 
	
		
			
				|  |  | + * @stat: target blkg_stat
 | 
	
		
			
				|  |  | + * @val: value to add
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Add @val to @stat.  The caller is responsible for synchronizing calls to
 | 
	
		
			
				|  |  | + * this function.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	u64_stats_update_begin(&stat->syncp);
 | 
	
		
			
				|  |  | +	stat->cnt += val;
 | 
	
		
			
				|  |  | +	u64_stats_update_end(&stat->syncp);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * blkg_stat_read - read the current value of a blkg_stat
 | 
	
		
			
				|  |  | + * @stat: blkg_stat to read
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Read the current value of @stat.  This function can be called without
 | 
	
		
			
				|  |  | + * synchroniztion and takes care of u64 atomicity.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
 |