|
@@ -1239,3 +1239,129 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
if (cfqg->nr_cfqq)
|
|
|
return;
|
|
|
|
|
|
+ cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
|
|
|
+ cfq_group_service_tree_del(st, cfqg);
|
|
|
+ cfqg->saved_workload_slice = 0;
|
|
|
+ cfqg_stats_update_dequeue(cfqg);
|
|
|
+}
|
|
|
+
|
|
|
+static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
|
|
|
+ unsigned int *unaccounted_time)
|
|
|
+{
|
|
|
+ unsigned int slice_used;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Queue got expired before even a single request completed or
|
|
|
+ * got expired immediately after first request completion.
|
|
|
+ */
|
|
|
+ if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
|
|
|
+ /*
|
|
|
+ * Also charge the seek time incurred to the group, otherwise
|
|
|
+ * if there are mutiple queues in the group, each can dispatch
|
|
|
+ * a single request on seeky media and cause lots of seek time
|
|
|
+ * and group will never know it.
|
|
|
+ */
|
|
|
+ slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
|
|
|
+ 1);
|
|
|
+ } else {
|
|
|
+ slice_used = jiffies - cfqq->slice_start;
|
|
|
+ if (slice_used > cfqq->allocated_slice) {
|
|
|
+ *unaccounted_time = slice_used - cfqq->allocated_slice;
|
|
|
+ slice_used = cfqq->allocated_slice;
|
|
|
+ }
|
|
|
+ if (time_after(cfqq->slice_start, cfqq->dispatch_start))
|
|
|
+ *unaccounted_time += cfqq->slice_start -
|
|
|
+ cfqq->dispatch_start;
|
|
|
+ }
|
|
|
+
|
|
|
+ return slice_used;
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
|
|
|
+ struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
|
|
+ unsigned int used_sl, charge, unaccounted_sl = 0;
|
|
|
+ int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
|
|
|
+ - cfqg->service_tree_idle.count;
|
|
|
+
|
|
|
+ BUG_ON(nr_sync < 0);
|
|
|
+ used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
|
|
|
+
|
|
|
+ if (iops_mode(cfqd))
|
|
|
+ charge = cfqq->slice_dispatch;
|
|
|
+ else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
|
|
|
+ charge = cfqq->allocated_slice;
|
|
|
+
|
|
|
+ /* Can't update vdisktime while group is on service tree */
|
|
|
+ cfq_group_service_tree_del(st, cfqg);
|
|
|
+ cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
|
|
|
+ /* If a new weight was requested, update now, off tree */
|
|
|
+ cfq_group_service_tree_add(st, cfqg);
|
|
|
+
|
|
|
+ /* This group is being expired. Save the context */
|
|
|
+ if (time_after(cfqd->workload_expires, jiffies)) {
|
|
|
+ cfqg->saved_workload_slice = cfqd->workload_expires
|
|
|
+ - jiffies;
|
|
|
+ cfqg->saved_workload = cfqd->serving_type;
|
|
|
+ cfqg->saved_serving_prio = cfqd->serving_prio;
|
|
|
+ } else
|
|
|
+ cfqg->saved_workload_slice = 0;
|
|
|
+
|
|
|
+ cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
|
|
|
+ st->min_vdisktime);
|
|
|
+ cfq_log_cfqq(cfqq->cfqd, cfqq,
|
|
|
+ "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
|
|
|
+ used_sl, cfqq->slice_dispatch, charge,
|
|
|
+ iops_mode(cfqd), cfqq->nr_sectors);
|
|
|
+ cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
|
|
|
+ cfqg_stats_set_start_empty_time(cfqg);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * cfq_init_cfqg_base - initialize base part of a cfq_group
|
|
|
+ * @cfqg: cfq_group to initialize
|
|
|
+ *
|
|
|
+ * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
|
|
|
+ * is enabled or not.
|
|
|
+ */
|
|
|
+static void cfq_init_cfqg_base(struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ struct cfq_rb_root *st;
|
|
|
+ int i, j;
|
|
|
+
|
|
|
+ for_each_cfqg_st(cfqg, i, j, st)
|
|
|
+ *st = CFQ_RB_ROOT;
|
|
|
+ RB_CLEAR_NODE(&cfqg->rb_node);
|
|
|
+
|
|
|
+ cfqg->ttime.last_end_request = jiffies;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
+static void cfq_pd_init(struct blkcg_gq *blkg)
|
|
|
+{
|
|
|
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
|
|
|
+
|
|
|
+ cfq_init_cfqg_base(cfqg);
|
|
|
+ cfqg->weight = blkg->blkcg->cfq_weight;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Search for the cfq group current task belongs to. request_queue lock must
|
|
|
+ * be held.
|
|
|
+ */
|
|
|
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
|
|
|
+ struct blkcg *blkcg)
|
|
|
+{
|
|
|
+ struct request_queue *q = cfqd->queue;
|
|
|
+ struct cfq_group *cfqg = NULL;
|
|
|
+
|
|
|
+ /* avoid lookup for the common case where there's no blkcg */
|
|
|
+ if (blkcg == &blkcg_root) {
|
|
|
+ cfqg = cfqd->root_group;
|
|
|
+ } else {
|
|
|
+ struct blkcg_gq *blkg;
|
|
|
+
|
|
|
+ blkg = blkg_lookup_create(blkcg, q);
|
|
|
+ if (!IS_ERR(blkg))
|
|
|
+ cfqg = blkg_to_cfqg(blkg);
|