|
@@ -311,3 +311,42 @@ void __blkg_release(struct blkcg_gq *blkg)
|
|
|
* request queue links.
|
|
|
*
|
|
|
* Having a reference to blkg under an rcu allows acess to only
|
|
|
+ * values local to groups like group stats and group rate limits
|
|
|
+ */
|
|
|
+ call_rcu(&blkg->rcu_head, blkg_rcu_free);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__blkg_release);
|
|
|
+
|
|
|
+/*
|
|
|
+ * The next function used by blk_queue_for_each_rl(). It's a bit tricky
|
|
|
+ * because the root blkg uses @q->root_rl instead of its own rl.
|
|
|
+ */
|
|
|
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
|
|
|
+ struct request_queue *q)
|
|
|
+{
|
|
|
+ struct list_head *ent;
|
|
|
+ struct blkcg_gq *blkg;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Determine the current blkg list_head. The first entry is
|
|
|
+ * root_rl which is off @q->blkg_list and mapped to the head.
|
|
|
+ */
|
|
|
+ if (rl == &q->root_rl) {
|
|
|
+ ent = &q->blkg_list;
|
|
|
+ /* There are no more block groups, hence no request lists */
|
|
|
+ if (list_empty(ent))
|
|
|
+ return NULL;
|
|
|
+ } else {
|
|
|
+ blkg = container_of(rl, struct blkcg_gq, rl);
|
|
|
+ ent = &blkg->q_node;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* walk to the next list_head, skip root blkcg */
|
|
|
+ ent = ent->next;
|
|
|
+ if (ent == &q->root_blkg->q_node)
|
|
|
+ ent = ent->next;
|
|
|
+ if (ent == &q->blkg_list)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ blkg = container_of(ent, struct blkcg_gq, q_node);
|
|
|
+ return &blkg->rl;
|