|
@@ -649,3 +649,62 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
|
|
|
* existing slice to make sure it is at least throtl_slice interval
|
|
|
* long since now.
|
|
|
*/
|
|
|
+ if (throtl_slice_used(td, tg, rw))
|
|
|
+ throtl_start_new_slice(td, tg, rw);
|
|
|
+ else {
|
|
|
+ if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
|
|
|
+ throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
|
|
|
+ && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
|
|
|
+ if (wait)
|
|
|
+ *wait = 0;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ max_wait = max(bps_wait, iops_wait);
|
|
|
+
|
|
|
+ if (wait)
|
|
|
+ *wait = max_wait;
|
|
|
+
|
|
|
+ if (time_before(tg->slice_end[rw], jiffies + max_wait))
|
|
|
+ throtl_extend_slice(td, tg, rw, jiffies + max_wait);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
|
|
|
+ int rw)
|
|
|
+{
|
|
|
+ struct throtl_grp *tg = blkg_to_tg(blkg);
|
|
|
+ struct tg_stats_cpu *stats_cpu;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /* If per cpu stats are not allocated yet, don't do any accounting. */
|
|
|
+ if (tg->stats_cpu == NULL)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Disabling interrupts to provide mutual exclusion between two
|
|
|
+ * writes on same cpu. It probably is not needed for 64bit. Not
|
|
|
+ * optimizing that case yet.
|
|
|
+ */
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
+ stats_cpu = this_cpu_ptr(tg->stats_cpu);
|
|
|
+
|
|
|
+ blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
|
|
|
+ blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
|
|
|
+
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
|
|
|
+{
|
|
|
+ bool rw = bio_data_dir(bio);
|
|
|
+
|
|
|
+ /* Charge the bio to the group */
|
|
|
+ tg->bytes_disp[rw] += bio->bi_size;
|
|
|
+ tg->io_disp[rw]++;
|
|
|
+
|