|
@@ -1425,3 +1425,186 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
|
|
cfqg->dev_weight = ctx.v;
|
|
cfqg->dev_weight = ctx.v;
|
|
cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
|
|
cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
|
|
ret = 0;
|
|
ret = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ blkg_conf_finish(&ctx);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
|
|
|
|
+{
|
|
|
|
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
|
|
|
+ struct blkcg_gq *blkg;
|
|
|
|
+ struct hlist_node *n;
|
|
|
|
+
|
|
|
|
+ if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ spin_lock_irq(&blkcg->lock);
|
|
|
|
+ blkcg->cfq_weight = (unsigned int)val;
|
|
|
|
+
|
|
|
|
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
|
|
|
|
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
|
|
|
|
+
|
|
|
|
+ if (cfqg && !cfqg->dev_weight)
|
|
|
|
+ cfqg->new_weight = blkcg->cfq_weight;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock_irq(&blkcg->lock);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
|
|
|
|
+ struct seq_file *sf)
|
|
|
|
+{
|
|
|
|
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
|
|
|
+
|
|
|
|
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
|
|
|
|
+ cft->private, false);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
|
|
|
|
+ struct seq_file *sf)
|
|
|
|
+{
|
|
|
|
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
|
|
|
+
|
|
|
|
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
|
|
|
|
+ cft->private, true);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_DEBUG_BLK_CGROUP
|
|
|
|
+static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
|
|
|
|
+ struct blkg_policy_data *pd, int off)
|
|
|
|
+{
|
|
|
|
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
|
|
|
|
+ u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
|
|
|
|
+ u64 v = 0;
|
|
|
|
+
|
|
|
|
+ if (samples) {
|
|
|
|
+ v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
|
|
|
|
+ do_div(v, samples);
|
|
|
|
+ }
|
|
|
|
+ __blkg_prfill_u64(sf, pd, v);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* print avg_queue_size */
|
|
|
|
+static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
|
|
|
|
+ struct seq_file *sf)
|
|
|
|
+{
|
|
|
|
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
|
|
|
|
+
|
|
|
|
+ blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
|
|
|
|
+ &blkcg_policy_cfq, 0, false);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
|
|
|
|
+
|
|
|
|
+static struct cftype cfq_blkcg_files[] = {
|
|
|
|
+ {
|
|
|
|
+ .name = "weight_device",
|
|
|
|
+ .read_seq_string = cfqg_print_weight_device,
|
|
|
|
+ .write_string = cfqg_set_weight_device,
|
|
|
|
+ .max_write_len = 256,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "weight",
|
|
|
|
+ .read_seq_string = cfq_print_weight,
|
|
|
|
+ .write_u64 = cfq_set_weight,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "time",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.time),
|
|
|
|
+ .read_seq_string = cfqg_print_stat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "sectors",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.sectors),
|
|
|
|
+ .read_seq_string = cfqg_print_stat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "io_service_bytes",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.service_bytes),
|
|
|
|
+ .read_seq_string = cfqg_print_rwstat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "io_serviced",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.serviced),
|
|
|
|
+ .read_seq_string = cfqg_print_rwstat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "io_service_time",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.service_time),
|
|
|
|
+ .read_seq_string = cfqg_print_rwstat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "io_wait_time",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.wait_time),
|
|
|
|
+ .read_seq_string = cfqg_print_rwstat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "io_merged",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.merged),
|
|
|
|
+ .read_seq_string = cfqg_print_rwstat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "io_queued",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.queued),
|
|
|
|
+ .read_seq_string = cfqg_print_rwstat,
|
|
|
|
+ },
|
|
|
|
+#ifdef CONFIG_DEBUG_BLK_CGROUP
|
|
|
|
+ {
|
|
|
|
+ .name = "avg_queue_size",
|
|
|
|
+ .read_seq_string = cfqg_print_avg_queue_size,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "group_wait_time",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.group_wait_time),
|
|
|
|
+ .read_seq_string = cfqg_print_stat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "idle_time",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.idle_time),
|
|
|
|
+ .read_seq_string = cfqg_print_stat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "empty_time",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.empty_time),
|
|
|
|
+ .read_seq_string = cfqg_print_stat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "dequeue",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.dequeue),
|
|
|
|
+ .read_seq_string = cfqg_print_stat,
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ .name = "unaccounted_time",
|
|
|
|
+ .private = offsetof(struct cfq_group, stats.unaccounted_time),
|
|
|
|
+ .read_seq_string = cfqg_print_stat,
|
|
|
|
+ },
|
|
|
|
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
|
|
|
|
+ { } /* terminate */
|
|
|
|
+};
|
|
|
|
+#else /* GROUP_IOSCHED */
|
|
|
|
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
|
|
|
|
+ struct blkcg *blkcg)
|
|
|
|
+{
|
|
|
|
+ return cfqd->root_group;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void
|
|
|
|
+cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
|
|
|
|
+ cfqq->cfqg = cfqg;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#endif /* GROUP_IOSCHED */
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * The cfqd->service_trees holds all pending cfq_queue's that have
|
|
|
|
+ * requests waiting to be processed. It is sorted in the order that
|
|
|
|
+ * we will service the queues.
|
|
|
|
+ */
|
|
|
|
+static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
|
+ bool add_front)
|