|
@@ -2305,3 +2305,88 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Otherwise, we do only if they are the last ones
|
|
* Otherwise, we do only if they are the last ones
|
|
|
|
+ * in their service tree.
|
|
|
|
+ */
|
|
|
|
+ if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
|
|
|
|
+ !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
|
|
|
|
+ return true;
|
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
|
|
|
|
+ service_tree->count);
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
|
|
|
+{
|
|
|
|
+ struct cfq_queue *cfqq = cfqd->active_queue;
|
|
|
|
+ struct cfq_io_cq *cic;
|
|
|
|
+ unsigned long sl, group_idle = 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * SSD device without seek penalty, disable idling. But only do so
|
|
|
|
+ * for devices that support queuing, otherwise we still have a problem
|
|
|
|
+ * with sync vs async workloads.
|
|
|
|
+ */
|
|
|
|
+ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
|
|
|
|
+ WARN_ON(cfq_cfqq_slice_new(cfqq));
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * idle is disabled, either manually or by past process history
|
|
|
|
+ */
|
|
|
|
+ if (!cfq_should_idle(cfqd, cfqq)) {
|
|
|
|
+ /* no queue idling. Check for group idling */
|
|
|
|
+ if (cfqd->cfq_group_idle)
|
|
|
|
+ group_idle = cfqd->cfq_group_idle;
|
|
|
|
+ else
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * still active requests from this queue, don't idle
|
|
|
|
+ */
|
|
|
|
+ if (cfqq->dispatched)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * task has exited, don't wait
|
|
|
|
+ */
|
|
|
|
+ cic = cfqd->active_cic;
|
|
|
|
+ if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If our average think time is larger than the remaining time
|
|
|
|
+ * slice, then don't idle. This avoids overrunning the allotted
|
|
|
|
+ * time slice.
|
|
|
|
+ */
|
|
|
|
+ if (sample_valid(cic->ttime.ttime_samples) &&
|
|
|
|
+ (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
|
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
|
|
|
|
+ cic->ttime.ttime_mean);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* There are other queues in the group, don't do group idle */
|
|
|
|
+ if (group_idle && cfqq->cfqg->nr_cfqq > 1)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ cfq_mark_cfqq_wait_request(cfqq);
|
|
|
|
+
|
|
|
|
+ if (group_idle)
|
|
|
|
+ sl = cfqd->cfq_group_idle;
|
|
|
|
+ else
|
|
|
|
+ sl = cfqd->cfq_slice_idle;
|
|
|
|
+
|
|
|
|
+ mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
|
|
|
|
+ cfqg_stats_set_start_idle_time(cfqq->cfqg);
|
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
|
|
|
|
+ group_idle ? 1 : 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Move request from internal lists to the request queue dispatch list.
|
|
|
|
+ */
|
|
|
|
+static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
|
|
|
|
+{
|