|
@@ -804,3 +804,166 @@ int blkcg_activate_policy(struct request_queue *q,
|
|
|
if (preloaded)
|
|
|
radix_tree_preload_end();
|
|
|
|
|
|
+ if (IS_ERR(blkg)) {
|
|
|
+ ret = PTR_ERR(blkg);
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+ q->root_blkg = blkg;
|
|
|
+ q->root_rl.blkg = blkg;
|
|
|
+
|
|
|
+ list_for_each_entry(blkg, &q->blkg_list, q_node)
|
|
|
+ cnt++;
|
|
|
+
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+
|
|
|
+ /* allocate policy_data for all existing blkgs */
|
|
|
+ while (cnt--) {
|
|
|
+ pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
|
|
|
+ if (!pd) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_free;
|
|
|
+ }
|
|
|
+ list_add_tail(&pd->alloc_node, &pds);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Install the allocated pds. With @q bypassing, no new blkg
|
|
|
+ * should have been created while the queue lock was dropped.
|
|
|
+ */
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+
|
|
|
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
|
|
+ if (WARN_ON(list_empty(&pds))) {
|
|
|
+ /* umm... this shouldn't happen, just abort */
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+ pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
|
|
|
+ list_del_init(&pd->alloc_node);
|
|
|
+
|
|
|
+ /* grab blkcg lock too while installing @pd on @blkg */
|
|
|
+ spin_lock(&blkg->blkcg->lock);
|
|
|
+
|
|
|
+ blkg->pd[pol->plid] = pd;
|
|
|
+ pd->blkg = blkg;
|
|
|
+ pol->pd_init_fn(blkg);
|
|
|
+
|
|
|
+ spin_unlock(&blkg->blkcg->lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ __set_bit(pol->plid, q->blkcg_pols);
|
|
|
+ ret = 0;
|
|
|
+out_unlock:
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+out_free:
|
|
|
+ blk_queue_bypass_end(q);
|
|
|
+ list_for_each_entry_safe(pd, n, &pds, alloc_node)
|
|
|
+ kfree(pd);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blkcg_activate_policy);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
|
|
|
+ * @q: request_queue of interest
|
|
|
+ * @pol: blkcg policy to deactivate
|
|
|
+ *
|
|
|
+ * Deactivate @pol on @q. Follows the same synchronization rules as
|
|
|
+ * blkcg_activate_policy().
|
|
|
+ */
|
|
|
+void blkcg_deactivate_policy(struct request_queue *q,
|
|
|
+ const struct blkcg_policy *pol)
|
|
|
+{
|
|
|
+ struct blkcg_gq *blkg;
|
|
|
+
|
|
|
+ if (!blkcg_policy_enabled(q, pol))
|
|
|
+ return;
|
|
|
+
|
|
|
+ blk_queue_bypass_start(q);
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+
|
|
|
+ __clear_bit(pol->plid, q->blkcg_pols);
|
|
|
+
|
|
|
+ /* if no policy is left, no need for blkgs - shoot them down */
|
|
|
+ if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
|
|
|
+ blkg_destroy_all(q);
|
|
|
+
|
|
|
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
|
|
+ /* grab blkcg lock too while removing @pd from @blkg */
|
|
|
+ spin_lock(&blkg->blkcg->lock);
|
|
|
+
|
|
|
+ if (pol->pd_exit_fn)
|
|
|
+ pol->pd_exit_fn(blkg);
|
|
|
+
|
|
|
+ kfree(blkg->pd[pol->plid]);
|
|
|
+ blkg->pd[pol->plid] = NULL;
|
|
|
+
|
|
|
+ spin_unlock(&blkg->blkcg->lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+ blk_queue_bypass_end(q);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blkcg_policy_register - register a blkcg policy
|
|
|
+ * @pol: blkcg policy to register
|
|
|
+ *
|
|
|
+ * Register @pol with blkcg core. Might sleep and @pol may be modified on
|
|
|
+ * successful registration. Returns 0 on success and -errno on failure.
|
|
|
+ */
|
|
|
+int blkcg_policy_register(struct blkcg_policy *pol)
|
|
|
+{
|
|
|
+ int i, ret;
|
|
|
+
|
|
|
+ if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ mutex_lock(&blkcg_pol_mutex);
|
|
|
+
|
|
|
+ /* find an empty slot */
|
|
|
+ ret = -ENOSPC;
|
|
|
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
|
|
|
+ if (!blkcg_policy[i])
|
|
|
+ break;
|
|
|
+ if (i >= BLKCG_MAX_POLS)
|
|
|
+ goto out_unlock;
|
|
|
+
|
|
|
+ /* register and update blkgs */
|
|
|
+ pol->plid = i;
|
|
|
+ blkcg_policy[i] = pol;
|
|
|
+
|
|
|
+ /* everything is in place, add intf files for the new policy */
|
|
|
+ if (pol->cftypes)
|
|
|
+ WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
|
|
|
+ ret = 0;
|
|
|
+out_unlock:
|
|
|
+ mutex_unlock(&blkcg_pol_mutex);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blkcg_policy_register);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blkcg_policy_unregister - unregister a blkcg policy
|
|
|
+ * @pol: blkcg policy to unregister
|
|
|
+ *
|
|
|
+ * Undo blkcg_policy_register(@pol). Might sleep.
|
|
|
+ */
|
|
|
+void blkcg_policy_unregister(struct blkcg_policy *pol)
|
|
|
+{
|
|
|
+ mutex_lock(&blkcg_pol_mutex);
|
|
|
+
|
|
|
+ if (WARN_ON(blkcg_policy[pol->plid] != pol))
|
|
|
+ goto out_unlock;
|
|
|
+
|
|
|
+ /* kill the intf files first */
|
|
|
+ if (pol->cftypes)
|
|
|
+ cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
|
|
|
+
|
|
|
+ /* unregister and update blkgs */
|
|
|
+ blkcg_policy[pol->plid] = NULL;
|
|
|
+out_unlock:
|
|
|
+ mutex_unlock(&blkcg_pol_mutex);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
|