Sfoglia il codice sorgente

waterDataDiscreteRateMining main.c 朱俊杰 commit at 2021-03-24

朱俊杰 4 anni fa
parent
commit
05f4aa0884
1 ha cambiato i file con 149 aggiunte e 0 eliminazioni
  1. 149 0
      waterDataDiscreteRateMining/main.c

+ 149 - 0
waterDataDiscreteRateMining/main.c

@@ -3376,3 +3376,152 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 	cfqq = cfqd->active_queue;
 	if (!cfqq)
 		return false;
+
+	if (cfq_class_idle(new_cfqq))
+		return false;
+
+	if (cfq_class_idle(cfqq))
+		return true;
+
+	/*
+	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
+	 */
+	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
+		return false;
+
+	/*
+	 * if the new request is sync, but the currently running queue is
+	 * not, let the sync request have priority.
+	 */
+	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
+		return true;
+
+	if (new_cfqq->cfqg != cfqq->cfqg)
+		return false;
+
+	if (cfq_slice_used(cfqq))
+		return true;
+
+	/* Allow preemption only if we are idling on sync-noidle tree */
+	if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
+	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
+	    new_cfqq->service_tree->count == 2 &&
+	    RB_EMPTY_ROOT(&cfqq->sort_list))
+		return true;
+
+	/*
+	 * So both queues are sync. Let the new request get disk time if
+	 * it's a metadata request and the current queue is doing regular IO.
+	 */
+	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
+		return true;
+
+	/*
+	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
+	 */
+	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
+		return true;
+
+	/* An idle queue should not be idle now for some reason */
+	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
+		return true;
+
+	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
+		return false;
+
+	/*
+	 * if this request is as-good as one we would expect from the
+	 * current cfqq, let it preempt
+	 */
+	if (cfq_rq_close(cfqd, cfqq, rq))
+		return true;
+
+	return false;
+}
+
+/*
+ * cfqq preempts the active queue. if we allowed preempt with no slice left,
+ * let it have half of its nominal slice.
+ */
+static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
+
+	cfq_log_cfqq(cfqd, cfqq, "preempt");
+	cfq_slice_expired(cfqd, 1);
+
+	/*
+	 * workload type is changed, don't save slice, otherwise preempt
+	 * doesn't happen
+	 */
+	if (old_type != cfqq_type(cfqq))
+		cfqq->cfqg->saved_workload_slice = 0;
+
+	/*
+	 * Put the new queue at the front of the of the current list,
+	 * so we know that it will be selected next.
+	 */
+	BUG_ON(!cfq_cfqq_on_rr(cfqq));
+
+	cfq_service_tree_add(cfqd, cfqq, 1);
+
+	cfqq->slice_end = 0;
+	cfq_mark_cfqq_slice_new(cfqq);
+}
+
+/*
+ * Called when a new fs request (rq) is added (to cfqq). Check if there's
+ * something we should do about it
+ */
+static void
+cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+		struct request *rq)
+{
+	struct cfq_io_cq *cic = RQ_CIC(rq);
+
+	cfqd->rq_queued++;
+	if (rq->cmd_flags & REQ_PRIO)
+		cfqq->prio_pending++;
+
+	cfq_update_io_thinktime(cfqd, cfqq, cic);
+	cfq_update_io_seektime(cfqd, cfqq, rq);
+	cfq_update_idle_window(cfqd, cfqq, cic);
+
+	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+
+	if (cfqq == cfqd->active_queue) {
+		/*
+		 * Remember that we saw a request from this process, but
+		 * don't start queuing just yet. Otherwise we risk seeing lots
+		 * of tiny requests, because we disrupt the normal plugging
+		 * and merging. If the request is already larger than a single
+		 * page, let it rip immediately. For that case we assume that
+		 * merging is already done. Ditto for a busy system that
+		 * has other work pending, don't risk delaying until the
+		 * idle timer unplug to continue working.
+		 */
+		if (cfq_cfqq_wait_request(cfqq)) {
+			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
+			    cfqd->busy_queues > 1) {
+				cfq_del_timer(cfqd, cfqq);
+				cfq_clear_cfqq_wait_request(cfqq);
+				__blk_run_queue(cfqd->queue);
+			} else {
+				cfqg_stats_update_idle_time(cfqq->cfqg);
+				cfq_mark_cfqq_must_dispatch(cfqq);
+			}
+		}
+	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
+		/*
+		 * not the active queue - expire current slice if it is
+		 * idle and has expired it's mean thinktime or this new queue
+		 * has some old slice time left and is of higher priority or
+		 * this new queue is RT and the current one is BE
+		 */
+		cfq_preempt_queue(cfqd, cfqq);
+		__blk_run_queue(cfqd->queue);
+	}
+}
+
+static void cfq_insert_request(struct request_queue *q, struct request *rq)
+{