|
@@ -1365,3 +1365,63 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
|
|
|
blkg = blkg_lookup_create(blkcg, q);
|
|
|
if (!IS_ERR(blkg))
|
|
|
cfqg = blkg_to_cfqg(blkg);
|
|
|
+ }
|
|
|
+
|
|
|
+ return cfqg;
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ /* Currently, all async queues are mapped to root group */
|
|
|
+ if (!cfq_cfqq_sync(cfqq))
|
|
|
+ cfqg = cfqq->cfqd->root_group;
|
|
|
+
|
|
|
+ cfqq->cfqg = cfqg;
|
|
|
+ /* cfqq reference on cfqg */
|
|
|
+ cfqg_get(cfqg);
|
|
|
+}
|
|
|
+
|
|
|
+static u64 cfqg_prfill_weight_device(struct seq_file *sf,
|
|
|
+ struct blkg_policy_data *pd, int off)
|
|
|
+{
|
|
|
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
|
|
|
+
|
|
|
+ if (!cfqg->dev_weight)
|
|
|
+ return 0;
|
|
|
+ return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
|
|
|
+}
|
|
|
+
|
|
|
+static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
|
|
|
+ struct seq_file *sf)
|
|
|
+{
|
|
|
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
|
|
|
+ cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
|
|
|
+ false);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
|
|
|
+ struct seq_file *sf)
|
|
|
+{
|
|
|
+ seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
|
|
|
+ const char *buf)
|
|
|
+{
|
|
|
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
|
|
+ struct blkg_conf_ctx ctx;
|
|
|
+ struct cfq_group *cfqg;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ret = -EINVAL;
|
|
|
+ cfqg = blkg_to_cfqg(ctx.blkg);
|
|
|
+ if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
|
|
|
+ cfqg->dev_weight = ctx.v;
|
|
|
+ cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
|
|
|
+ ret = 0;
|