|
@@ -373,3 +373,104 @@ enum cfqq_state_flags {
|
|
|
CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
|
|
|
CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
|
|
|
CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
|
|
|
+ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
|
|
|
+ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
|
|
|
+ CFQ_CFQQ_FLAG_sync, /* synchronous queue */
|
|
|
+ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
|
|
|
+ CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
|
|
|
+ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
|
|
|
+ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
|
|
|
+};
|
|
|
+
|
|
|
+#define CFQ_CFQQ_FNS(name) \
|
|
|
+static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
|
|
|
+{ \
|
|
|
+ (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
|
|
|
+} \
|
|
|
+static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
|
|
|
+{ \
|
|
|
+ (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
|
|
|
+} \
|
|
|
+static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
|
|
|
+{ \
|
|
|
+ return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
|
|
|
+}
|
|
|
+
|
|
|
+CFQ_CFQQ_FNS(on_rr);
|
|
|
+CFQ_CFQQ_FNS(wait_request);
|
|
|
+CFQ_CFQQ_FNS(must_dispatch);
|
|
|
+CFQ_CFQQ_FNS(must_alloc_slice);
|
|
|
+CFQ_CFQQ_FNS(fifo_expire);
|
|
|
+CFQ_CFQQ_FNS(idle_window);
|
|
|
+CFQ_CFQQ_FNS(prio_changed);
|
|
|
+CFQ_CFQQ_FNS(slice_new);
|
|
|
+CFQ_CFQQ_FNS(sync);
|
|
|
+CFQ_CFQQ_FNS(coop);
|
|
|
+CFQ_CFQQ_FNS(split_coop);
|
|
|
+CFQ_CFQQ_FNS(deep);
|
|
|
+CFQ_CFQQ_FNS(wait_busy);
|
|
|
+#undef CFQ_CFQQ_FNS
|
|
|
+
|
|
|
+static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
|
|
|
+{
|
|
|
+ return pd ? container_of(pd, struct cfq_group, pd) : NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ return pd_to_blkg(&cfqg->pd);
|
|
|
+}
|
|
|
+
|
|
|
+#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
|
|
|
+
|
|
|
+/* cfqg stats flags */
|
|
|
+enum cfqg_stats_flags {
|
|
|
+ CFQG_stats_waiting = 0,
|
|
|
+ CFQG_stats_idling,
|
|
|
+ CFQG_stats_empty,
|
|
|
+};
|
|
|
+
|
|
|
+#define CFQG_FLAG_FNS(name) \
|
|
|
+static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
|
|
|
+{ \
|
|
|
+ stats->flags |= (1 << CFQG_stats_##name); \
|
|
|
+} \
|
|
|
+static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
|
|
|
+{ \
|
|
|
+ stats->flags &= ~(1 << CFQG_stats_##name); \
|
|
|
+} \
|
|
|
+static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
|
|
|
+{ \
|
|
|
+ return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
|
|
|
+} \
|
|
|
+
|
|
|
+CFQG_FLAG_FNS(waiting)
|
|
|
+CFQG_FLAG_FNS(idling)
|
|
|
+CFQG_FLAG_FNS(empty)
|
|
|
+#undef CFQG_FLAG_FNS
|
|
|
+
|
|
|
+/* This should be called with the queue_lock held. */
|
|
|
+static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
|
|
|
+{
|
|
|
+ unsigned long long now;
|
|
|
+
|
|
|
+ if (!cfqg_stats_waiting(stats))
|
|
|
+ return;
|
|
|
+
|
|
|
+ now = sched_clock();
|
|
|
+ if (time_after64(now, stats->start_group_wait_time))
|
|
|
+ blkg_stat_add(&stats->group_wait_time,
|
|
|
+ now - stats->start_group_wait_time);
|
|
|
+ cfqg_stats_clear_waiting(stats);
|
|
|
+}
|
|
|
+
|
|
|
+/* This should be called with the queue_lock held. */
|
|
|
+static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
|
|
|
+ struct cfq_group *curr_cfqg)
|
|
|
+{
|
|
|
+ struct cfqg_stats *stats = &cfqg->stats;
|
|
|
+
|
|
|
+ if (cfqg_stats_waiting(stats))
|
|
|
+ return;
|
|
|
+ if (cfqg == curr_cfqg)
|
|
|
+ return;
|