|
@@ -3586,3 +3586,164 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
|
|
|
/* If there are other queues in the group, don't wait */
|
|
|
if (cfqq->cfqg->nr_cfqq > 1)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* the only queue in the group, but think time is big */
|
|
|
+ if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (cfq_slice_used(cfqq))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /* if slice left is less than think time, wait busy */
|
|
|
+ if (cic && sample_valid(cic->ttime.ttime_samples)
|
|
|
+ && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If think times is less than a jiffy than ttime_mean=0 and above
|
|
|
+ * will not be true. It might happen that slice has not expired yet
|
|
|
+ * but will expire soon (4-5 ns) during select_queue(). To cover the
|
|
|
+ * case where think time is less than a jiffy, mark the queue wait
|
|
|
+ * busy if only 1 jiffy is left in the slice.
|
|
|
+ */
|
|
|
+ if (cfqq->slice_end - jiffies == 1)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|
|
+{
|
|
|
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
|
|
+ struct cfq_data *cfqd = cfqq->cfqd;
|
|
|
+ const int sync = rq_is_sync(rq);
|
|
|
+ unsigned long now;
|
|
|
+
|
|
|
+ now = jiffies;
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
|
|
|
+ !!(rq->cmd_flags & REQ_NOIDLE));
|
|
|
+
|
|
|
+ cfq_update_hw_tag(cfqd);
|
|
|
+
|
|
|
+ WARN_ON(!cfqd->rq_in_driver);
|
|
|
+ WARN_ON(!cfqq->dispatched);
|
|
|
+ cfqd->rq_in_driver--;
|
|
|
+ cfqq->dispatched--;
|
|
|
+ (RQ_CFQG(rq))->dispatched--;
|
|
|
+ cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
|
|
|
+ rq_io_start_time_ns(rq), rq->cmd_flags);
|
|
|
+
|
|
|
+ cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
|
|
|
+
|
|
|
+ if (sync) {
|
|
|
+ struct cfq_rb_root *service_tree;
|
|
|
+
|
|
|
+ RQ_CIC(rq)->ttime.last_end_request = now;
|
|
|
+
|
|
|
+ if (cfq_cfqq_on_rr(cfqq))
|
|
|
+ service_tree = cfqq->service_tree;
|
|
|
+ else
|
|
|
+ service_tree = service_tree_for(cfqq->cfqg,
|
|
|
+ cfqq_prio(cfqq), cfqq_type(cfqq));
|
|
|
+ service_tree->ttime.last_end_request = now;
|
|
|
+ if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
|
|
|
+ cfqd->last_delayed_sync = now;
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
+ cfqq->cfqg->ttime.last_end_request = now;
|
|
|
+#endif
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this is the active queue, check if it needs to be expired,
|
|
|
+ * or if we want to idle in case it has no pending requests.
|
|
|
+ */
|
|
|
+ if (cfqd->active_queue == cfqq) {
|
|
|
+ const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
|
|
|
+
|
|
|
+ if (cfq_cfqq_slice_new(cfqq)) {
|
|
|
+ cfq_set_prio_slice(cfqd, cfqq);
|
|
|
+ cfq_clear_cfqq_slice_new(cfqq);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Should we wait for next request to come in before we expire
|
|
|
+ * the queue.
|
|
|
+ */
|
|
|
+ if (cfq_should_wait_busy(cfqd, cfqq)) {
|
|
|
+ unsigned long extend_sl = cfqd->cfq_slice_idle;
|
|
|
+ if (!cfqd->cfq_slice_idle)
|
|
|
+ extend_sl = cfqd->cfq_group_idle;
|
|
|
+ cfqq->slice_end = jiffies + extend_sl;
|
|
|
+ cfq_mark_cfqq_wait_busy(cfqq);
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "will busy wait");
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Idling is not enabled on:
|
|
|
+ * - expired queues
|
|
|
+ * - idle-priority queues
|
|
|
+ * - async queues
|
|
|
+ * - queues with still some requests queued
|
|
|
+ * - when there is a close cooperator
|
|
|
+ */
|
|
|
+ if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
|
|
|
+ cfq_slice_expired(cfqd, 1);
|
|
|
+ else if (sync && cfqq_empty &&
|
|
|
+ !cfq_close_cooperator(cfqd, cfqq)) {
|
|
|
+ cfq_arm_slice_timer(cfqd);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!cfqd->rq_in_driver)
|
|
|
+ cfq_schedule_dispatch(cfqd);
|
|
|
+}
|
|
|
+
|
|
|
+static inline int __cfq_may_queue(struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
|
|
|
+ cfq_mark_cfqq_must_alloc_slice(cfqq);
|
|
|
+ return ELV_MQUEUE_MUST;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ELV_MQUEUE_MAY;
|
|
|
+}
|
|
|
+
|
|
|
+static int cfq_may_queue(struct request_queue *q, int rw)
|
|
|
+{
|
|
|
+ struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
|
+ struct task_struct *tsk = current;
|
|
|
+ struct cfq_io_cq *cic;
|
|
|
+ struct cfq_queue *cfqq;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * don't force setup of a queue from here, as a call to may_queue
|
|
|
+ * does not necessarily imply that a request actually will be queued.
|
|
|
+ * so just lookup a possibly existing queue, or return 'may queue'
|
|
|
+ * if that fails
|
|
|
+ */
|
|
|
+ cic = cfq_cic_lookup(cfqd, tsk->io_context);
|
|
|
+ if (!cic)
|
|
|
+ return ELV_MQUEUE_MAY;
|
|
|
+
|
|
|
+ cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
|
|
|
+ if (cfqq) {
|
|
|
+ cfq_init_prio_data(cfqq, cic);
|
|
|
+
|
|
|
+ return __cfq_may_queue(cfqq);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ELV_MQUEUE_MAY;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * queue lock held here
|
|
|
+ */
|
|
|
+static void cfq_put_request(struct request *rq)
|
|
|
+{
|
|
|
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
|
|
+
|
|
|
+ if (cfqq) {
|
|
|
+ const int rw = rq_data_dir(rq);
|
|
|
+
|