|
@@ -508,3 +508,106 @@ u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
|
|
|
int off)
|
|
|
{
|
|
|
struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
|
|
|
+
|
|
|
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blkg_conf_prep - parse and prepare for per-blkg config update
|
|
|
+ * @blkcg: target block cgroup
|
|
|
+ * @pol: target policy
|
|
|
+ * @input: input string
|
|
|
+ * @ctx: blkg_conf_ctx to be filled
|
|
|
+ *
|
|
|
+ * Parse per-blkg config update from @input and initialize @ctx with the
|
|
|
+ * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
|
|
|
+ * value. This function returns with RCU read lock and queue lock held and
|
|
|
+ * must be paired with blkg_conf_finish().
|
|
|
+ */
|
|
|
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|
|
+ const char *input, struct blkg_conf_ctx *ctx)
|
|
|
+ __acquires(rcu) __acquires(disk->queue->queue_lock)
|
|
|
+{
|
|
|
+ struct gendisk *disk;
|
|
|
+ struct blkcg_gq *blkg;
|
|
|
+ unsigned int major, minor;
|
|
|
+ unsigned long long v;
|
|
|
+ int part, ret;
|
|
|
+
|
|
|
+ if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ disk = get_gendisk(MKDEV(major, minor), &part);
|
|
|
+ if (!disk || part)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ spin_lock_irq(disk->queue->queue_lock);
|
|
|
+
|
|
|
+ if (blkcg_policy_enabled(disk->queue, pol))
|
|
|
+ blkg = blkg_lookup_create(blkcg, disk->queue);
|
|
|
+ else
|
|
|
+ blkg = ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
+ if (IS_ERR(blkg)) {
|
|
|
+ ret = PTR_ERR(blkg);
|
|
|
+ rcu_read_unlock();
|
|
|
+ spin_unlock_irq(disk->queue->queue_lock);
|
|
|
+ put_disk(disk);
|
|
|
+ /*
|
|
|
+ * If queue was bypassing, we should retry. Do so after a
|
|
|
+ * short msleep(). It isn't strictly necessary but queue
|
|
|
+ * can be bypassing for some time and it's always nice to
|
|
|
+ * avoid busy looping.
|
|
|
+ */
|
|
|
+ if (ret == -EBUSY) {
|
|
|
+ msleep(10);
|
|
|
+ ret = restart_syscall();
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ ctx->disk = disk;
|
|
|
+ ctx->blkg = blkg;
|
|
|
+ ctx->v = v;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blkg_conf_prep);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blkg_conf_finish - finish up per-blkg config update
|
|
|
+ * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
|
|
|
+ *
|
|
|
+ * Finish up after per-blkg config update. This function must be paired
|
|
|
+ * with blkg_conf_prep().
|
|
|
+ */
|
|
|
+void blkg_conf_finish(struct blkg_conf_ctx *ctx)
|
|
|
+ __releases(ctx->disk->queue->queue_lock) __releases(rcu)
|
|
|
+{
|
|
|
+ spin_unlock_irq(ctx->disk->queue->queue_lock);
|
|
|
+ rcu_read_unlock();
|
|
|
+ put_disk(ctx->disk);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blkg_conf_finish);
|
|
|
+
|
|
|
+struct cftype blkcg_files[] = {
|
|
|
+ {
|
|
|
+ .name = "reset_stats",
|
|
|
+ .write_u64 = blkcg_reset_stats,
|
|
|
+ },
|
|
|
+ { } /* terminate */
|
|
|
+};
|
|
|
+
|
|
|
+/**
|
|
|
+ * blkcg_css_offline - cgroup css_offline callback
|
|
|
+ * @cgroup: cgroup of interest
|
|
|
+ *
|
|
|
+ * This function is called when @cgroup is about to go away and responsible
|
|
|
+ * for shooting down all blkgs associated with @cgroup. blkgs should be
|
|
|
+ * removed while holding both q and blkcg locks. As blkcg lock is nested
|
|
|
+ * inside q lock, this function performs reverse double lock dancing.
|
|
|
+ *
|
|
|
+ * This is the blkcg counterpart of ioc_release_fn().
|
|
|
+ */
|
|
|
+static void blkcg_css_offline(struct cgroup *cgroup)
|