瀏覽代碼

waterDataFluctuationCorrelation analysisOfTheCausesOfTheFluctuation.c 张婷 commit at 2020-09-10

张婷 4 年之前
父節點
當前提交
3fa57427e0

+ 178 - 0
waterDataFluctuationCorrelation/fluctuationCorrelationOfSprayEnd/analysisOfTheCausesOfTheFluctuation.c

@@ -142,3 +142,181 @@ static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
 {
 	return blkg_to_tg(td->queue->root_blkg);
 }
+
+enum tg_state_flags {
+	THROTL_TG_FLAG_on_rr = 0,	/* on round-robin busy list */
+};
+
+#define THROTL_TG_FNS(name)						\
+static inline void throtl_mark_tg_##name(struct throtl_grp *tg)		\
+{									\
+	(tg)->flags |= (1 << THROTL_TG_FLAG_##name);			\
+}									\
+static inline void throtl_clear_tg_##name(struct throtl_grp *tg)	\
+{									\
+	(tg)->flags &= ~(1 << THROTL_TG_FLAG_##name);			\
+}									\
+static inline int throtl_tg_##name(const struct throtl_grp *tg)		\
+{									\
+	return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0;	\
+}
+
+THROTL_TG_FNS(on_rr);
+
+#define throtl_log_tg(td, tg, fmt, args...)	do {			\
+	char __pbuf[128];						\
+									\
+	blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf));		\
+	blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
+} while (0)
+
+#define throtl_log(td, fmt, args...)	\
+	blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
+
+static inline unsigned int total_nr_queued(struct throtl_data *td)
+{
+	return td->nr_queued[0] + td->nr_queued[1];
+}
+
+/*
+ * Worker for allocating per cpu stat for tgs. This is scheduled on the
+ * system_wq once there are some groups on the alloc_list waiting for
+ * allocation.
+ */
+static void tg_stats_alloc_fn(struct work_struct *work)
+{
+	static struct tg_stats_cpu *stats_cpu;	/* this fn is non-reentrant */
+	struct delayed_work *dwork = to_delayed_work(work);
+	bool empty = false;
+
+alloc_stats:
+	if (!stats_cpu) {
+		stats_cpu = alloc_percpu(struct tg_stats_cpu);
+		if (!stats_cpu) {
+			/* allocation failed, try again after some time */
+			schedule_delayed_work(dwork, msecs_to_jiffies(10));
+			return;
+		}
+	}
+
+	spin_lock_irq(&tg_stats_alloc_lock);
+
+	if (!list_empty(&tg_stats_alloc_list)) {
+		struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
+							 struct throtl_grp,
+							 stats_alloc_node);
+		swap(tg->stats_cpu, stats_cpu);
+		list_del_init(&tg->stats_alloc_node);
+	}
+
+	empty = list_empty(&tg_stats_alloc_list);
+	spin_unlock_irq(&tg_stats_alloc_lock);
+	if (!empty)
+		goto alloc_stats;
+}
+
+static void throtl_pd_init(struct blkcg_gq *blkg)
+{
+	struct throtl_grp *tg = blkg_to_tg(blkg);
+	unsigned long flags;
+
+	RB_CLEAR_NODE(&tg->rb_node);
+	bio_list_init(&tg->bio_lists[0]);
+	bio_list_init(&tg->bio_lists[1]);
+	tg->limits_changed = false;
+
+	tg->bps[READ] = -1;
+	tg->bps[WRITE] = -1;
+	tg->iops[READ] = -1;
+	tg->iops[WRITE] = -1;
+
+	/*
+	 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
+	 * but percpu allocator can't be called from IO path.  Queue tg on
+	 * tg_stats_alloc_list and allocate from work item.
+	 */
+	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+	list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
+	schedule_delayed_work(&tg_stats_alloc_work, 0);
+	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
+}
+
+static void throtl_pd_exit(struct blkcg_gq *blkg)
+{
+	struct throtl_grp *tg = blkg_to_tg(blkg);
+	unsigned long flags;
+
+	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+	list_del_init(&tg->stats_alloc_node);
+	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
+
+	free_percpu(tg->stats_cpu);
+}
+
+static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
+{
+	struct throtl_grp *tg = blkg_to_tg(blkg);
+	int cpu;
+
+	if (tg->stats_cpu == NULL)
+		return;
+
+	for_each_possible_cpu(cpu) {
+		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
+
+		blkg_rwstat_reset(&sc->service_bytes);
+		blkg_rwstat_reset(&sc->serviced);
+	}
+}
+
+static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
+					   struct blkcg *blkcg)
+{
+	/*
+	 * This is the common case when there are no blkcgs.  Avoid lookup
+	 * in this case
+	 */
+	if (blkcg == &blkcg_root)
+		return td_root_tg(td);
+
+	return blkg_to_tg(blkg_lookup(blkcg, td->queue));
+}
+
+static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
+						  struct blkcg *blkcg)
+{
+	struct request_queue *q = td->queue;
+	struct throtl_grp *tg = NULL;
+
+	/*
+	 * This is the common case when there are no blkcgs.  Avoid lookup
+	 * in this case
+	 */
+	if (blkcg == &blkcg_root) {
+		tg = td_root_tg(td);
+	} else {
+		struct blkcg_gq *blkg;
+
+		blkg = blkg_lookup_create(blkcg, q);
+
+		/* if %NULL and @q is alive, fall back to root_tg */
+		if (!IS_ERR(blkg))
+			tg = blkg_to_tg(blkg);
+		else if (!blk_queue_dying(q))
+			tg = td_root_tg(td);
+	}
+
+	return tg;
+}
+
+static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
+{
+	/* Service tree is empty */
+	if (!root->count)
+		return NULL;
+
+	if (!root->left)
+		root->left = rb_first(&root->rb);
+
+	if (root->left)
+		return rb_entry_tg(root->left);