Procházet zdrojové kódy

waterDataFluctuationCorrelation preliminaryDataProcessing.c 张婷 commit at 2020-10-29

张婷 před 4 roky
rodič
revize
353571ee5a

+ 172 - 0
waterDataFluctuationCorrelation/monitoringDataProcessing/preliminaryDataProcessing.c

@@ -631,3 +631,175 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 	INIT_LIST_HEAD(&q->flush_queue[0]);
 	INIT_LIST_HEAD(&q->flush_queue[1]);
 	INIT_LIST_HEAD(&q->flush_data_in_flight);
+	INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
+
+	kobject_init(&q->kobj, &blk_queue_ktype);
+
+	mutex_init(&q->sysfs_lock);
+	spin_lock_init(&q->__queue_lock);
+
+	/*
+	 * By default initialize queue_lock to internal lock and driver can
+	 * override it later if need be.
+	 */
+	q->queue_lock = &q->__queue_lock;
+
+	/*
+	 * A queue starts its life with bypass turned on to avoid
+	 * unnecessary bypass on/off overhead and nasty surprises during
+	 * init.  The initial bypass will be finished when the queue is
+	 * registered by blk_register_queue().
+	 */
+	q->bypass_depth = 1;
+	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+	if (blkcg_init_queue(q))
+		goto fail_id;
+
+	return q;
+
+fail_id:
+	ida_simple_remove(&blk_queue_ida, q->id);
+fail_q:
+	kmem_cache_free(blk_requestq_cachep, q);
+	return NULL;
+}
+EXPORT_SYMBOL(blk_alloc_queue_node);
+
+/**
+ * blk_init_queue  - prepare a request queue for use with a block device
+ * @rfn:  The function to be called to process requests that have been
+ *        placed on the queue.
+ * @lock: Request queue spin lock
+ *
+ * Description:
+ *    If a block device wishes to use the standard request handling procedures,
+ *    which sorts requests and coalesces adjacent requests, then it must
+ *    call blk_init_queue().  The function @rfn will be called when there
+ *    are requests on the queue that need to be processed.  If the device
+ *    supports plugging, then @rfn may not be called immediately when requests
+ *    are available on the queue, but may be called at some time later instead.
+ *    Plugged queues are generally unplugged when a buffer belonging to one
+ *    of the requests on the queue is needed, or due to memory pressure.
+ *
+ *    @rfn is not required, or even expected, to remove all requests off the
+ *    queue, but only as many as it can handle at a time.  If it does leave
+ *    requests on the queue, it is responsible for arranging that the requests
+ *    get dealt with eventually.
+ *
+ *    The queue spin lock must be held while manipulating the requests on the
+ *    request queue; this lock will be taken also from interrupt context, so irq
+ *    disabling is needed for it.
+ *
+ *    Function returns a pointer to the initialized request queue, or %NULL if
+ *    it didn't succeed.
+ *
+ * Note:
+ *    blk_init_queue() must be paired with a blk_cleanup_queue() call
+ *    when the block device is deactivated (such as at module unload).
+ **/
+
+struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+{
+	return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
+}
+EXPORT_SYMBOL(blk_init_queue);
+
+struct request_queue *
+blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+{
+	struct request_queue *uninit_q, *q;
+
+	uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+	if (!uninit_q)
+		return NULL;
+
+	q = blk_init_allocated_queue(uninit_q, rfn, lock);
+	if (!q)
+		blk_cleanup_queue(uninit_q);
+
+	return q;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+struct request_queue *
+blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+			 spinlock_t *lock)
+{
+	if (!q)
+		return NULL;
+
+	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+		return NULL;
+
+	q->request_fn		= rfn;
+	q->prep_rq_fn		= NULL;
+	q->unprep_rq_fn		= NULL;
+	q->queue_flags		|= QUEUE_FLAG_DEFAULT;
+
+	/* Override internal queue lock with supplied lock pointer */
+	if (lock)
+		q->queue_lock		= lock;
+
+	/*
+	 * This also sets hw/phys segments, boundary and size
+	 */
+	blk_queue_make_request(q, blk_queue_bio);
+
+	q->sg_reserved_size = INT_MAX;
+
+	/* init elevator */
+	if (elevator_init(q, NULL))
+		return NULL;
+	return q;
+}
+EXPORT_SYMBOL(blk_init_allocated_queue);
+
+bool blk_get_queue(struct request_queue *q)
+{
+	if (likely(!blk_queue_dying(q))) {
+		__blk_get_queue(q);
+		return true;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(blk_get_queue);
+
+static inline void blk_free_request(struct request_list *rl, struct request *rq)
+{
+	if (rq->cmd_flags & REQ_ELVPRIV) {
+		elv_put_request(rl->q, rq);
+		if (rq->elv.icq)
+			put_io_context(rq->elv.icq->ioc);
+	}
+
+	mempool_free(rq, rl->rq_pool);
+}
+
+/*
+ * ioc_batching returns true if the ioc is a valid batching request and
+ * should be given priority access to a request.
+ */
+static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
+{
+	if (!ioc)
+		return 0;
+
+	/*
+	 * Make sure the process is able to allocate at least 1 request
+	 * even if the batch times out, otherwise we could theoretically
+	 * lose wakeups.
+	 */
+	return ioc->nr_batch_requests == q->nr_batching ||
+		(ioc->nr_batch_requests > 0
+		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
+}
+
+/*
+ * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
+ * will cause the process to be a "batcher" on all queues in the system. This
+ * is the behaviour we want though - once it gets a wakeup it should be given
+ * a nice run.
+ */
+static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)