Sfoglia il codice sorgente

waterDataDiscreteRateMining main.c 朱俊杰 commit at 2021-04-01

朱俊杰 4 anni fa
parent
commit
6b2a78fd20
1 ha cambiato i file con 138 aggiunte e 0 eliminazioni
  1. 138 0
      waterDataDiscreteRateMining/main.c

+ 138 - 0
waterDataDiscreteRateMining/main.c

@@ -3747,3 +3747,141 @@ static void cfq_put_request(struct request *rq)
 	if (cfqq) {
 		const int rw = rq_data_dir(rq);
 
+		BUG_ON(!cfqq->allocated[rw]);
+		cfqq->allocated[rw]--;
+
+		/* Put down rq reference on cfqg */
+		cfqg_put(RQ_CFQG(rq));
+		rq->elv.priv[0] = NULL;
+		rq->elv.priv[1] = NULL;
+
+		cfq_put_queue(cfqq);
+	}
+}
+
+static struct cfq_queue *
+cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
+		struct cfq_queue *cfqq)
+{
+	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
+	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
+	cfq_mark_cfqq_coop(cfqq->new_cfqq);
+	cfq_put_queue(cfqq);
+	return cic_to_cfqq(cic, 1);
+}
+
+/*
+ * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
+ * was the last process referring to said cfqq.
+ */
+static struct cfq_queue *
+split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
+{
+	if (cfqq_process_refs(cfqq) == 1) {
+		cfqq->pid = current->pid;
+		cfq_clear_cfqq_coop(cfqq);
+		cfq_clear_cfqq_split_coop(cfqq);
+		return cfqq;
+	}
+
+	cic_set_cfqq(cic, NULL, 1);
+
+	cfq_put_cooperator(cfqq);
+
+	cfq_put_queue(cfqq);
+	return NULL;
+}
+/*
+ * Allocate cfq data structures associated with this request.
+ */
+static int
+cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
+		gfp_t gfp_mask)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
+	const int rw = rq_data_dir(rq);
+	const bool is_sync = rq_is_sync(rq);
+	struct cfq_queue *cfqq;
+
+	might_sleep_if(gfp_mask & __GFP_WAIT);
+
+	spin_lock_irq(q->queue_lock);
+
+	check_ioprio_changed(cic, bio);
+	check_blkcg_changed(cic, bio);
+new_queue:
+	cfqq = cic_to_cfqq(cic, is_sync);
+	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
+		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
+		cic_set_cfqq(cic, cfqq, is_sync);
+	} else {
+		/*
+		 * If the queue was seeky for too long, break it apart.
+		 */
+		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
+			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
+			cfqq = split_cfqq(cic, cfqq);
+			if (!cfqq)
+				goto new_queue;
+		}
+
+		/*
+		 * Check to see if this queue is scheduled to merge with
+		 * another, closely cooperating queue.  The merging of
+		 * queues happens here as it must be done in process context.
+		 * The reference on new_cfqq was taken in merge_cfqqs.
+		 */
+		if (cfqq->new_cfqq)
+			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
+	}
+
+	cfqq->allocated[rw]++;
+
+	cfqq->ref++;
+	cfqg_get(cfqq->cfqg);
+	rq->elv.priv[0] = cfqq;
+	rq->elv.priv[1] = cfqq->cfqg;
+	spin_unlock_irq(q->queue_lock);
+	return 0;
+}
+
+static void cfq_kick_queue(struct work_struct *work)
+{
+	struct cfq_data *cfqd =
+		container_of(work, struct cfq_data, unplug_work);
+	struct request_queue *q = cfqd->queue;
+
+	spin_lock_irq(q->queue_lock);
+	__blk_run_queue(cfqd->queue);
+	spin_unlock_irq(q->queue_lock);
+}
+
+/*
+ * Timer running if the active_queue is currently idling inside its time slice
+ */
+static void cfq_idle_slice_timer(unsigned long data)
+{
+	struct cfq_data *cfqd = (struct cfq_data *) data;
+	struct cfq_queue *cfqq;
+	unsigned long flags;
+	int timed_out = 1;
+
+	cfq_log(cfqd, "idle timer fired");
+
+	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+	cfqq = cfqd->active_queue;
+	if (cfqq) {
+		timed_out = 0;
+
+		/*
+		 * We saw a request before the queue expired, let it through
+		 */
+		if (cfq_cfqq_must_dispatch(cfqq))
+			goto out_kick;
+
+		/*
+		 * expired
+		 */
+		if (cfq_slice_used(cfqq))