|
@@ -856,3 +856,130 @@ static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
|
|
|
return d;
|
|
|
}
|
|
|
|
|
|
+static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
|
|
|
+{
|
|
|
+ s64 delta = (s64)(vdisktime - min_vdisktime);
|
|
|
+ if (delta > 0)
|
|
|
+ min_vdisktime = vdisktime;
|
|
|
+
|
|
|
+ return min_vdisktime;
|
|
|
+}
|
|
|
+
|
|
|
+static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
|
|
|
+{
|
|
|
+ s64 delta = (s64)(vdisktime - min_vdisktime);
|
|
|
+ if (delta < 0)
|
|
|
+ min_vdisktime = vdisktime;
|
|
|
+
|
|
|
+ return min_vdisktime;
|
|
|
+}
|
|
|
+
|
|
|
+static void update_min_vdisktime(struct cfq_rb_root *st)
|
|
|
+{
|
|
|
+ struct cfq_group *cfqg;
|
|
|
+
|
|
|
+ if (st->left) {
|
|
|
+ cfqg = rb_entry_cfqg(st->left);
|
|
|
+ st->min_vdisktime = max_vdisktime(st->min_vdisktime,
|
|
|
+ cfqg->vdisktime);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * get averaged number of queues of RT/BE priority.
|
|
|
+ * average is updated, with a formula that gives more weight to higher numbers,
|
|
|
+ * to quickly follows sudden increases and decrease slowly
|
|
|
+ */
|
|
|
+
|
|
|
+static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
|
|
|
+ struct cfq_group *cfqg, bool rt)
|
|
|
+{
|
|
|
+ unsigned min_q, max_q;
|
|
|
+ unsigned mult = cfq_hist_divisor - 1;
|
|
|
+ unsigned round = cfq_hist_divisor / 2;
|
|
|
+ unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
|
|
|
+
|
|
|
+ min_q = min(cfqg->busy_queues_avg[rt], busy);
|
|
|
+ max_q = max(cfqg->busy_queues_avg[rt], busy);
|
|
|
+ cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
|
|
|
+ cfq_hist_divisor;
|
|
|
+ return cfqg->busy_queues_avg[rt];
|
|
|
+}
|
|
|
+
|
|
|
+static inline unsigned
|
|
|
+cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
|
|
+
|
|
|
+ return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
|
|
|
+}
|
|
|
+
|
|
|
+static inline unsigned
|
|
|
+cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
|
|
|
+ if (cfqd->cfq_latency) {
|
|
|
+ /*
|
|
|
+ * interested queues (we consider only the ones with the same
|
|
|
+ * priority class in the cfq group)
|
|
|
+ */
|
|
|
+ unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
|
|
|
+ cfq_class_rt(cfqq));
|
|
|
+ unsigned sync_slice = cfqd->cfq_slice[1];
|
|
|
+ unsigned expect_latency = sync_slice * iq;
|
|
|
+ unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
|
|
|
+
|
|
|
+ if (expect_latency > group_slice) {
|
|
|
+ unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
|
|
|
+ /* scale low_slice according to IO priority
|
|
|
+ * and sync vs async */
|
|
|
+ unsigned low_slice =
|
|
|
+ min(slice, base_low_slice * slice / sync_slice);
|
|
|
+ /* the adapted slice value is scaled to fit all iqs
|
|
|
+ * into the target latency */
|
|
|
+ slice = max(slice * group_slice / expect_latency,
|
|
|
+ low_slice);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return slice;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
|
|
|
+
|
|
|
+ cfqq->slice_start = jiffies;
|
|
|
+ cfqq->slice_end = jiffies + slice;
|
|
|
+ cfqq->allocated_slice = slice;
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
|
|
|
+ * isn't valid until the first request from the dispatch is activated
|
|
|
+ * and the slice time set.
|
|
|
+ */
|
|
|
+static inline bool cfq_slice_used(struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ if (cfq_cfqq_slice_new(cfqq))
|
|
|
+ return false;
|
|
|
+ if (time_before(jiffies, cfqq->slice_end))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
|
|
|
+ * We choose the request that is closest to the head right now. Distance
|
|
|
+ * behind the head is penalized and only allowed to a certain extent.
|
|
|
+ */
|
|
|
+static struct request *
|
|
|
+cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
|
|
|
+{
|
|
|
+ sector_t s1, s2, d1 = 0, d2 = 0;
|
|
|
+ unsigned long back_max;
|
|
|
+#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
|
|
|
+#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
|
|
|
+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
|