|
@@ -2202,3 +2202,106 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
|
|
|
/*
|
|
|
* If the exact sector wasn't found, the parent of the NULL leaf
|
|
|
* will contain the closest sector.
|
|
|
+ */
|
|
|
+ __cfqq = rb_entry(parent, struct cfq_queue, p_node);
|
|
|
+ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
|
|
|
+ return __cfqq;
|
|
|
+
|
|
|
+ if (blk_rq_pos(__cfqq->next_rq) < sector)
|
|
|
+ node = rb_next(&__cfqq->p_node);
|
|
|
+ else
|
|
|
+ node = rb_prev(&__cfqq->p_node);
|
|
|
+ if (!node)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ __cfqq = rb_entry(node, struct cfq_queue, p_node);
|
|
|
+ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
|
|
|
+ return __cfqq;
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * cfqd - obvious
|
|
|
+ * cur_cfqq - passed in so that we don't decide that the current queue is
|
|
|
+ * closely cooperating with itself.
|
|
|
+ *
|
|
|
+ * So, basically we're assuming that that cur_cfqq has dispatched at least
|
|
|
+ * one request, and that cfqd->last_position reflects a position on the disk
|
|
|
+ * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
|
|
|
+ * assumption.
|
|
|
+ */
|
|
|
+static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
|
|
|
+ struct cfq_queue *cur_cfqq)
|
|
|
+{
|
|
|
+ struct cfq_queue *cfqq;
|
|
|
+
|
|
|
+ if (cfq_class_idle(cur_cfqq))
|
|
|
+ return NULL;
|
|
|
+ if (!cfq_cfqq_sync(cur_cfqq))
|
|
|
+ return NULL;
|
|
|
+ if (CFQQ_SEEKY(cur_cfqq))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't search priority tree if it's the only queue in the group.
|
|
|
+ */
|
|
|
+ if (cur_cfqq->cfqg->nr_cfqq == 1)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We should notice if some of the queues are cooperating, eg
|
|
|
+ * working closely on the same area of the disk. In that case,
|
|
|
+ * we can group them together and don't waste time idling.
|
|
|
+ */
|
|
|
+ cfqq = cfqq_close(cfqd, cur_cfqq);
|
|
|
+ if (!cfqq)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /* If new queue belongs to different cfq_group, don't choose it */
|
|
|
+ if (cur_cfqq->cfqg != cfqq->cfqg)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * It only makes sense to merge sync queues.
|
|
|
+ */
|
|
|
+ if (!cfq_cfqq_sync(cfqq))
|
|
|
+ return NULL;
|
|
|
+ if (CFQQ_SEEKY(cfqq))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Do not merge queues of different priority classes
|
|
|
+ */
|
|
|
+ if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return cfqq;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Determine whether we should enforce idle window for this queue.
|
|
|
+ */
|
|
|
+
|
|
|
+static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ enum wl_prio_t prio = cfqq_prio(cfqq);
|
|
|
+ struct cfq_rb_root *service_tree = cfqq->service_tree;
|
|
|
+
|
|
|
+ BUG_ON(!service_tree);
|
|
|
+ BUG_ON(!service_tree->count);
|
|
|
+
|
|
|
+ if (!cfqd->cfq_slice_idle)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* We never do for idle class queues. */
|
|
|
+ if (prio == IDLE_WORKLOAD)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* We do for queues that were marked with idle window flag. */
|
|
|
+ if (cfq_cfqq_idle_window(cfqq) &&
|
|
|
+ !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Otherwise, we do only if they are the last ones
|