|
@@ -559,3 +559,93 @@ static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
|
|
|
}
|
|
|
|
|
|
/* Calc approx time to dispatch */
|
|
|
+ jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
|
|
|
+
|
|
|
+ if (jiffy_wait > jiffy_elapsed)
|
|
|
+ jiffy_wait = jiffy_wait - jiffy_elapsed;
|
|
|
+ else
|
|
|
+ jiffy_wait = 1;
|
|
|
+
|
|
|
+ if (wait)
|
|
|
+ *wait = jiffy_wait;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
|
|
|
+ struct bio *bio, unsigned long *wait)
|
|
|
+{
|
|
|
+ bool rw = bio_data_dir(bio);
|
|
|
+ u64 bytes_allowed, extra_bytes, tmp;
|
|
|
+ unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
|
|
|
+
|
|
|
+ jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
|
|
|
+
|
|
|
+ /* Slice has just started. Consider one slice interval */
|
|
|
+ if (!jiffy_elapsed)
|
|
|
+ jiffy_elapsed_rnd = throtl_slice;
|
|
|
+
|
|
|
+ jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
|
|
|
+
|
|
|
+ tmp = tg->bps[rw] * jiffy_elapsed_rnd;
|
|
|
+ do_div(tmp, HZ);
|
|
|
+ bytes_allowed = tmp;
|
|
|
+
|
|
|
+ if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
|
|
|
+ if (wait)
|
|
|
+ *wait = 0;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Calc approx time to dispatch */
|
|
|
+ extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
|
|
|
+ jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
|
|
|
+
|
|
|
+ if (!jiffy_wait)
|
|
|
+ jiffy_wait = 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This wait time is without taking into consideration the rounding
|
|
|
+ * up we did. Add that time also.
|
|
|
+ */
|
|
|
+ jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
|
|
|
+ if (wait)
|
|
|
+ *wait = jiffy_wait;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
|
|
|
+ if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
|
|
|
+ return 1;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Returns whether one can dispatch a bio or not. Also returns approx number
|
|
|
+ * of jiffies to wait before this bio is with-in IO rate and can be dispatched
|
|
|
+ */
|
|
|
+static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
|
|
|
+ struct bio *bio, unsigned long *wait)
|
|
|
+{
|
|
|
+ bool rw = bio_data_dir(bio);
|
|
|
+ unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Currently whole state machine of group depends on first bio
|
|
|
+ * queued in the group bio list. So one should not be calling
|
|
|
+ * this function with a different bio if there are other bios
|
|
|
+ * queued.
|
|
|
+ */
|
|
|
+ BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
|
|
|
+
|
|
|
+ /* If tg->bps = -1, then BW is unlimited */
|
|
|
+ if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
|
|
|
+ if (wait)
|
|
|
+ *wait = 0;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If previous slice expired, start a new one otherwise renew/extend
|
|
|
+ * existing slice to make sure it is at least throtl_slice interval
|
|
|
+ * long since now.
|
|
|
+ */
|