|
@@ -2452,3 +2452,106 @@ static int cfqq_process_refs(struct cfq_queue *cfqq)
|
|
return process_refs;
|
|
return process_refs;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
|
|
|
|
+{
|
|
|
|
+ int process_refs, new_process_refs;
|
|
|
|
+ struct cfq_queue *__cfqq;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If there are no process references on the new_cfqq, then it is
|
|
|
|
+ * unsafe to follow the ->new_cfqq chain as other cfqq's in the
|
|
|
|
+ * chain may have dropped their last reference (not just their
|
|
|
|
+ * last process reference).
|
|
|
|
+ */
|
|
|
|
+ if (!cfqq_process_refs(new_cfqq))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* Avoid a circular list and skip interim queue merges */
|
|
|
|
+ while ((__cfqq = new_cfqq->new_cfqq)) {
|
|
|
|
+ if (__cfqq == cfqq)
|
|
|
|
+ return;
|
|
|
|
+ new_cfqq = __cfqq;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ process_refs = cfqq_process_refs(cfqq);
|
|
|
|
+ new_process_refs = cfqq_process_refs(new_cfqq);
|
|
|
|
+ /*
|
|
|
|
+ * If the process for the cfqq has gone away, there is no
|
|
|
|
+ * sense in merging the queues.
|
|
|
|
+ */
|
|
|
|
+ if (process_refs == 0 || new_process_refs == 0)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Merge in the direction of the lesser amount of work.
|
|
|
|
+ */
|
|
|
|
+ if (new_process_refs >= process_refs) {
|
|
|
|
+ cfqq->new_cfqq = new_cfqq;
|
|
|
|
+ new_cfqq->ref += process_refs;
|
|
|
|
+ } else {
|
|
|
|
+ new_cfqq->new_cfqq = cfqq;
|
|
|
|
+ cfqq->ref += new_process_refs;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
|
|
|
|
+ struct cfq_group *cfqg, enum wl_prio_t prio)
|
|
|
|
+{
|
|
|
|
+ struct cfq_queue *queue;
|
|
|
|
+ int i;
|
|
|
|
+ bool key_valid = false;
|
|
|
|
+ unsigned long lowest_key = 0;
|
|
|
|
+ enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i <= SYNC_WORKLOAD; ++i) {
|
|
|
|
+ /* select the one with lowest rb_key */
|
|
|
|
+ queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
|
|
|
|
+ if (queue &&
|
|
|
|
+ (!key_valid || time_before(queue->rb_key, lowest_key))) {
|
|
|
|
+ lowest_key = queue->rb_key;
|
|
|
|
+ cur_best = i;
|
|
|
|
+ key_valid = true;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return cur_best;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
|
+{
|
|
|
|
+ unsigned slice;
|
|
|
|
+ unsigned count;
|
|
|
|
+ struct cfq_rb_root *st;
|
|
|
|
+ unsigned group_slice;
|
|
|
|
+ enum wl_prio_t original_prio = cfqd->serving_prio;
|
|
|
|
+
|
|
|
|
+ /* Choose next priority. RT > BE > IDLE */
|
|
|
|
+ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
|
|
|
|
+ cfqd->serving_prio = RT_WORKLOAD;
|
|
|
|
+ else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
|
|
|
|
+ cfqd->serving_prio = BE_WORKLOAD;
|
|
|
|
+ else {
|
|
|
|
+ cfqd->serving_prio = IDLE_WORKLOAD;
|
|
|
|
+ cfqd->workload_expires = jiffies + 1;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (original_prio != cfqd->serving_prio)
|
|
|
|
+ goto new_workload;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * For RT and BE, we have to choose also the type
|
|
|
|
+ * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
|
|
|
|
+ * expiration time
|
|
|
|
+ */
|
|
|
|
+ st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
|
|
|
|
+ count = st->count;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * check workload expiration, and that we still have other queues ready
|
|
|
|
+ */
|
|
|
|
+ if (count && !time_after(jiffies, cfqd->workload_expires))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+new_workload:
|
|
|
|
+ /* otherwise select new workload type */
|