浏览代码

waterDataDiscreteRateMining main.c 朱俊杰 commit at 2021-03-05

朱俊杰 4 年之前
父节点
当前提交
f0d5424194
共有 1 个文件被更改,包括 135 次插入0 次删除
  1. 135 0
      waterDataDiscreteRateMining/main.c

+ 135 - 0
waterDataDiscreteRateMining/main.c

@@ -2728,3 +2728,138 @@ check_group_idle:
 	}
 
 expire:
+	cfq_slice_expired(cfqd, 0);
+new_queue:
+	/*
+	 * Current queue expired. Check if we have to switch to a new
+	 * service tree
+	 */
+	if (!new_cfqq)
+		cfq_choose_cfqg(cfqd);
+
+	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
+keep_queue:
+	return cfqq;
+}
+
+static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
+{
+	int dispatched = 0;
+
+	while (cfqq->next_rq) {
+		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
+		dispatched++;
+	}
+
+	BUG_ON(!list_empty(&cfqq->fifo));
+
+	/* By default cfqq is not expired if it is empty. Do it explicitly */
+	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
+	return dispatched;
+}
+
+/*
+ * Drain our current requests. Used for barriers and when switching
+ * io schedulers on-the-fly.
+ */
+static int cfq_forced_dispatch(struct cfq_data *cfqd)
+{
+	struct cfq_queue *cfqq;
+	int dispatched = 0;
+
+	/* Expire the timeslice of the current active queue first */
+	cfq_slice_expired(cfqd, 0);
+	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
+		__cfq_set_active_queue(cfqd, cfqq);
+		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
+	}
+
+	BUG_ON(cfqd->busy_queues);
+
+	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
+	return dispatched;
+}
+
+static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
+	struct cfq_queue *cfqq)
+{
+	/* the queue hasn't finished any request, can't estimate */
+	if (cfq_cfqq_slice_new(cfqq))
+		return true;
+	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
+		cfqq->slice_end))
+		return true;
+
+	return false;
+}
+
+static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	unsigned int max_dispatch;
+
+	/*
+	 * Drain async requests before we start sync IO
+	 */
+	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
+		return false;
+
+	/*
+	 * If this is an async queue and we have sync IO in flight, let it wait
+	 */
+	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
+		return false;
+
+	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
+	if (cfq_class_idle(cfqq))
+		max_dispatch = 1;
+
+	/*
+	 * Does this cfqq already have too much IO in flight?
+	 */
+	if (cfqq->dispatched >= max_dispatch) {
+		bool promote_sync = false;
+		/*
+		 * idle queue must always only have a single IO in flight
+		 */
+		if (cfq_class_idle(cfqq))
+			return false;
+
+		/*
+		 * If there is only one sync queue
+		 * we can ignore async queue here and give the sync
+		 * queue no dispatch limit. The reason is a sync queue can
+		 * preempt async queue, limiting the sync queue doesn't make
+		 * sense. This is useful for aiostress test.
+		 */
+		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
+			promote_sync = true;
+
+		/*
+		 * We have other queues, don't allow more IO from this one
+		 */
+		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
+				!promote_sync)
+			return false;
+
+		/*
+		 * Sole queue user, no limit
+		 */
+		if (cfqd->busy_queues == 1 || promote_sync)
+			max_dispatch = -1;
+		else
+			/*
+			 * Normally we start throttling cfqq when cfq_quantum/2
+			 * requests have been dispatched. But we can drive
+			 * deeper queue depths at the beginning of slice
+			 * subjected to upper limit of cfq_quantum.
+			 * */
+			max_dispatch = cfqd->cfq_quantum;
+	}
+
+	/*
+	 * Async queues must wait a bit before being allowed dispatch.
+	 * We also ramp up the dispatch depth gradually for async IO,
+	 * based on the last sync IO we serviced
+	 */
+	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
+		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;