|
@@ -639,3 +639,77 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
|
|
struct cfqg_stats *stats = &cfqg->stats;
|
|
struct cfqg_stats *stats = &cfqg->stats;
|
|
unsigned long long now = sched_clock();
|
|
unsigned long long now = sched_clock();
|
|
|
|
|
|
|
|
+ if (time_after64(now, io_start_time))
|
|
|
|
+ blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
|
|
|
|
+ if (time_after64(io_start_time, start_time))
|
|
|
|
+ blkg_rwstat_add(&stats->wait_time, rw,
|
|
|
|
+ io_start_time - start_time);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
|
|
|
|
+{
|
|
|
|
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
|
|
|
|
+ struct cfqg_stats *stats = &cfqg->stats;
|
|
|
|
+
|
|
|
|
+ /* queued stats shouldn't be cleared */
|
|
|
|
+ blkg_rwstat_reset(&stats->service_bytes);
|
|
|
|
+ blkg_rwstat_reset(&stats->serviced);
|
|
|
|
+ blkg_rwstat_reset(&stats->merged);
|
|
|
|
+ blkg_rwstat_reset(&stats->service_time);
|
|
|
|
+ blkg_rwstat_reset(&stats->wait_time);
|
|
|
|
+ blkg_stat_reset(&stats->time);
|
|
|
|
+#ifdef CONFIG_DEBUG_BLK_CGROUP
|
|
|
|
+ blkg_stat_reset(&stats->unaccounted_time);
|
|
|
|
+ blkg_stat_reset(&stats->avg_queue_size_sum);
|
|
|
|
+ blkg_stat_reset(&stats->avg_queue_size_samples);
|
|
|
|
+ blkg_stat_reset(&stats->dequeue);
|
|
|
|
+ blkg_stat_reset(&stats->group_wait_time);
|
|
|
|
+ blkg_stat_reset(&stats->idle_time);
|
|
|
|
+ blkg_stat_reset(&stats->empty_time);
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#else /* CONFIG_CFQ_GROUP_IOSCHED */
|
|
|
|
+
|
|
|
|
+static inline void cfqg_get(struct cfq_group *cfqg) { }
|
|
|
|
+static inline void cfqg_put(struct cfq_group *cfqg) { }
|
|
|
|
+
|
|
|
|
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
|
|
|
|
+ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
|
|
|
|
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
|
|
|
|
+
|
|
|
|
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
|
|
|
|
+ struct cfq_group *curr_cfqg, int rw) { }
|
|
|
|
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
|
|
|
|
+ unsigned long time, unsigned long unaccounted_time) { }
|
|
|
|
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
|
|
|
|
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
|
|
|
|
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
|
|
|
|
+ uint64_t bytes, int rw) { }
|
|
|
|
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
|
|
|
|
+ uint64_t start_time, uint64_t io_start_time, int rw) { }
|
|
|
|
+
|
|
|
|
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
|
|
|
|
+
|
|
|
|
+#define cfq_log(cfqd, fmt, args...) \
|
|
|
|
+ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
|
|
|
|
+
|
|
|
|
+/* Traverses through cfq group service trees */
|
|
|
|
+#define for_each_cfqg_st(cfqg, i, j, st) \
|
|
|
|
+ for (i = 0; i <= IDLE_WORKLOAD; i++) \
|
|
|
|
+ for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
|
|
|
|
+ : &cfqg->service_tree_idle; \
|
|
|
|
+ (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
|
|
|
|
+ (i == IDLE_WORKLOAD && j == 0); \
|
|
|
|
+ j++, st = i < IDLE_WORKLOAD ? \
|
|
|
|
+ &cfqg->service_trees[i][j]: NULL) \
|
|
|
|
+
|
|
|
|
+static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
|
|
|
|
+ struct cfq_ttime *ttime, bool group_idle)
|
|
|
|
+{
|
|
|
|
+ unsigned long slice;
|
|
|
|
+ if (!sample_valid(ttime->ttime_samples))
|
|
|
|
+ return false;
|
|
|
|
+ if (group_idle)
|
|
|
|
+ slice = cfqd->cfq_group_idle;
|
|
|
|
+ else
|