|
@@ -3265,3 +3265,114 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
|
|
|
|
|
|
/*
|
|
/*
|
|
* pin the queue now that it's allocated, scheduler exit will prune it
|
|
* pin the queue now that it's allocated, scheduler exit will prune it
|
|
|
|
+ */
|
|
|
|
+ if (!is_sync && !(*async_cfqq)) {
|
|
|
|
+ cfqq->ref++;
|
|
|
|
+ *async_cfqq = cfqq;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ cfqq->ref++;
|
|
|
|
+ return cfqq;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
|
|
|
|
+{
|
|
|
|
+ unsigned long elapsed = jiffies - ttime->last_end_request;
|
|
|
|
+ elapsed = min(elapsed, 2UL * slice_idle);
|
|
|
|
+
|
|
|
|
+ ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
|
|
|
|
+ ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
|
|
|
|
+ ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
|
+ struct cfq_io_cq *cic)
|
|
|
|
+{
|
|
|
|
+ if (cfq_cfqq_sync(cfqq)) {
|
|
|
|
+ __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
|
|
|
|
+ __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
|
|
|
|
+ cfqd->cfq_slice_idle);
|
|
|
|
+ }
|
|
|
|
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
|
+ __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
|
+ struct request *rq)
|
|
|
|
+{
|
|
|
|
+ sector_t sdist = 0;
|
|
|
|
+ sector_t n_sec = blk_rq_sectors(rq);
|
|
|
|
+ if (cfqq->last_request_pos) {
|
|
|
|
+ if (cfqq->last_request_pos < blk_rq_pos(rq))
|
|
|
|
+ sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
|
|
|
|
+ else
|
|
|
|
+ sdist = cfqq->last_request_pos - blk_rq_pos(rq);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ cfqq->seek_history <<= 1;
|
|
|
|
+ if (blk_queue_nonrot(cfqd->queue))
|
|
|
|
+ cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
|
|
|
|
+ else
|
|
|
|
+ cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Disable idle window if the process thinks too long or seeks so much that
|
|
|
|
+ * it doesn't matter
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
|
+ struct cfq_io_cq *cic)
|
|
|
|
+{
|
|
|
|
+ int old_idle, enable_idle;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Don't idle for async or idle io prio class
|
|
|
|
+ */
|
|
|
|
+ if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
|
|
|
|
+
|
|
|
|
+ if (cfqq->queued[0] + cfqq->queued[1] >= 4)
|
|
|
|
+ cfq_mark_cfqq_deep(cfqq);
|
|
|
|
+
|
|
|
|
+ if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
|
|
|
|
+ enable_idle = 0;
|
|
|
|
+ else if (!atomic_read(&cic->icq.ioc->active_ref) ||
|
|
|
|
+ !cfqd->cfq_slice_idle ||
|
|
|
|
+ (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
|
|
|
|
+ enable_idle = 0;
|
|
|
|
+ else if (sample_valid(cic->ttime.ttime_samples)) {
|
|
|
|
+ if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
|
|
|
|
+ enable_idle = 0;
|
|
|
|
+ else
|
|
|
|
+ enable_idle = 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (old_idle != enable_idle) {
|
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
|
|
|
|
+ if (enable_idle)
|
|
|
|
+ cfq_mark_cfqq_idle_window(cfqq);
|
|
|
|
+ else
|
|
|
|
+ cfq_clear_cfqq_idle_window(cfqq);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Check if new_cfqq should preempt the currently active queue. Return 0 for
|
|
|
|
+ * no or if we aren't sure, a 1 will cause a preempt.
|
|
|
|
+ */
|
|
|
|
+static bool
|
|
|
|
+cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|
|
|
+ struct request *rq)
|
|
|
|
+{
|
|
|
|
+ struct cfq_queue *cfqq;
|
|
|
|
+
|
|
|
|
+ cfqq = cfqd->active_queue;
|
|
|
|
+ if (!cfqq)
|
|
|
|
+ return false;
|