Browse Source

waterDataFluctuationCorrelation preliminaryDataProcessing.c 张婷 commit at 2020-11-17

张婷 4 years ago
parent
commit
93505d4f0b

+ 175 - 0
waterDataFluctuationCorrelation/monitoringDataProcessing/preliminaryDataProcessing.c

@@ -803,3 +803,178 @@ static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
  * a nice run.
  */
 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
+{
+	if (!ioc || ioc_batching(q, ioc))
+		return;
+
+	ioc->nr_batch_requests = q->nr_batching;
+	ioc->last_waited = jiffies;
+}
+
+static void __freed_request(struct request_list *rl, int sync)
+{
+	struct request_queue *q = rl->q;
+
+	/*
+	 * bdi isn't aware of blkcg yet.  As all async IOs end up root
+	 * blkcg anyway, just use root blkcg state.
+	 */
+	if (rl == &q->root_rl &&
+	    rl->count[sync] < queue_congestion_off_threshold(q))
+		blk_clear_queue_congested(q, sync);
+
+	if (rl->count[sync] + 1 <= q->nr_requests) {
+		if (waitqueue_active(&rl->wait[sync]))
+			wake_up(&rl->wait[sync]);
+
+		blk_clear_rl_full(rl, sync);
+	}
+}
+
+/*
+ * A request has just been released.  Account for it, update the full and
+ * congestion status, wake up any waiters.   Called under q->queue_lock.
+ */
+static void freed_request(struct request_list *rl, unsigned int flags)
+{
+	struct request_queue *q = rl->q;
+	int sync = rw_is_sync(flags);
+
+	q->nr_rqs[sync]--;
+	rl->count[sync]--;
+	if (flags & REQ_ELVPRIV)
+		q->nr_rqs_elvpriv--;
+
+	__freed_request(rl, sync);
+
+	if (unlikely(rl->starved[sync ^ 1]))
+		__freed_request(rl, sync ^ 1);
+}
+
+/*
+ * Determine if elevator data should be initialized when allocating the
+ * request associated with @bio.
+ */
+static bool blk_rq_should_init_elevator(struct bio *bio)
+{
+	if (!bio)
+		return true;
+
+	/*
+	 * Flush requests do not use the elevator so skip initialization.
+	 * This allows a request to share the flush and elevator data.
+	 */
+	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
+		return false;
+
+	return true;
+}
+
+/**
+ * rq_ioc - determine io_context for request allocation
+ * @bio: request being allocated is for this bio (can be %NULL)
+ *
+ * Determine io_context to use for request allocation for @bio.  May return
+ * %NULL if %current->io_context doesn't exist.
+ */
+static struct io_context *rq_ioc(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+	if (bio && bio->bi_ioc)
+		return bio->bi_ioc;
+#endif
+	return current->io_context;
+}
+
+/**
+ * __get_request - get a free request
+ * @rl: request list to allocate from
+ * @rw_flags: RW and SYNC flags
+ * @bio: bio to allocate request for (can be %NULL)
+ * @gfp_mask: allocation mask
+ *
+ * Get a free request from @q.  This function may fail under memory
+ * pressure or if @q is dead.
+ *
+ * Must be callled with @q->queue_lock held and,
+ * Returns %NULL on failure, with @q->queue_lock held.
+ * Returns !%NULL on success, with @q->queue_lock *not held*.
+ */
+static struct request *__get_request(struct request_list *rl, int rw_flags,
+				     struct bio *bio, gfp_t gfp_mask)
+{
+	struct request_queue *q = rl->q;
+	struct request *rq;
+	struct elevator_type *et = q->elevator->type;
+	struct io_context *ioc = rq_ioc(bio);
+	struct io_cq *icq = NULL;
+	const bool is_sync = rw_is_sync(rw_flags) != 0;
+	int may_queue;
+
+	if (unlikely(blk_queue_dying(q)))
+		return NULL;
+
+	may_queue = elv_may_queue(q, rw_flags);
+	if (may_queue == ELV_MQUEUE_NO)
+		goto rq_starved;
+
+	if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
+		if (rl->count[is_sync]+1 >= q->nr_requests) {
+			/*
+			 * The queue will fill after this allocation, so set
+			 * it as full, and mark this process as "batching".
+			 * This process will be allowed to complete a batch of
+			 * requests, others will be blocked.
+			 */
+			if (!blk_rl_full(rl, is_sync)) {
+				ioc_set_batching(q, ioc);
+				blk_set_rl_full(rl, is_sync);
+			} else {
+				if (may_queue != ELV_MQUEUE_MUST
+						&& !ioc_batching(q, ioc)) {
+					/*
+					 * The queue is full and the allocating
+					 * process is not a "batcher", and not
+					 * exempted by the IO scheduler
+					 */
+					return NULL;
+				}
+			}
+		}
+		/*
+		 * bdi isn't aware of blkcg yet.  As all async IOs end up
+		 * root blkcg anyway, just use root blkcg state.
+		 */
+		if (rl == &q->root_rl)
+			blk_set_queue_congested(q, is_sync);
+	}
+
+	/*
+	 * Only allow batching queuers to allocate up to 50% over the defined
+	 * limit of requests, otherwise we could have thousands of requests
+	 * allocated with any setting of ->nr_requests
+	 */
+	if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
+		return NULL;
+
+	q->nr_rqs[is_sync]++;
+	rl->count[is_sync]++;
+	rl->starved[is_sync] = 0;
+
+	/*
+	 * Decide whether the new request will be managed by elevator.  If
+	 * so, mark @rw_flags and increment elvpriv.  Non-zero elvpriv will
+	 * prevent the current elevator from being destroyed until the new
+	 * request is freed.  This guarantees icq's won't be destroyed and
+	 * makes creating new ones safe.
+	 *
+	 * Also, lookup icq while holding queue_lock.  If it doesn't exist,
+	 * it will be created after releasing queue_lock.
+	 */
+	if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
+		rw_flags |= REQ_ELVPRIV;
+		q->nr_rqs_elvpriv++;
+		if (et->icq_cache && ioc)
+			icq = ioc_lookup_icq(ioc, q);
+	}
+