소스 검색

waterDataFluctuationCorrelation analysisOfLiquidLevelDataOperation.c 薛梅子 commit at 2021-03-05

薛梅子 4 년 전
부모
커밋
0e0fdfddbb
1개의 변경된 파일167개의 추가작업 그리고 0개의 파일을 삭제
  1. 167 0
      waterDataFluctuationCorrelation/fluctuationCorrelationOfSprayEnd/analysisOfLiquidLevelDataOperation.c

+ 167 - 0
waterDataFluctuationCorrelation/fluctuationCorrelationOfSprayEnd/analysisOfLiquidLevelDataOperation.c

@@ -74,3 +74,170 @@ void __blk_queue_free_tags(struct request_queue *q)
 	q->queue_tags = NULL;
 	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 }
+
+/**
+ * blk_free_tags - release a given set of tag maintenance info
+ * @bqt:	the tag map to free
+ *
+ * For externally managed @bqt frees the map.  Callers of this
+ * function must guarantee to have released all the queues that
+ * might have been using this tag map.
+ */
+void blk_free_tags(struct blk_queue_tag *bqt)
+{
+	if (unlikely(!__blk_free_tags(bqt)))
+		BUG();
+}
+EXPORT_SYMBOL(blk_free_tags);
+
+/**
+ * blk_queue_free_tags - release tag maintenance info
+ * @q:  the request queue for the device
+ *
+ *  Notes:
+ *	This is used to disable tagged queuing to a device, yet leave
+ *	queue in function.
+ **/
+void blk_queue_free_tags(struct request_queue *q)
+{
+	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
+}
+EXPORT_SYMBOL(blk_queue_free_tags);
+
+static int
+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
+{
+	struct request **tag_index;
+	unsigned long *tag_map;
+	int nr_ulongs;
+
+	if (q && depth > q->nr_requests * 2) {
+		depth = q->nr_requests * 2;
+		printk(KERN_ERR "%s: adjusted depth to %d\n",
+		       __func__, depth);
+	}
+
+	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+	if (!tag_index)
+		goto fail;
+
+	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
+	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+	if (!tag_map)
+		goto fail;
+
+	tags->real_max_depth = depth;
+	tags->max_depth = depth;
+	tags->tag_index = tag_index;
+	tags->tag_map = tag_map;
+
+	return 0;
+fail:
+	kfree(tag_index);
+	return -ENOMEM;
+}
+
+static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
+						   int depth)
+{
+	struct blk_queue_tag *tags;
+
+	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+	if (!tags)
+		goto fail;
+
+	if (init_tag_map(q, tags, depth))
+		goto fail;
+
+	atomic_set(&tags->refcnt, 1);
+	return tags;
+fail:
+	kfree(tags);
+	return NULL;
+}
+
+/**
+ * blk_init_tags - initialize the tag info for an external tag map
+ * @depth:	the maximum queue depth supported
+ **/
+struct blk_queue_tag *blk_init_tags(int depth)
+{
+	return __blk_queue_init_tags(NULL, depth);
+}
+EXPORT_SYMBOL(blk_init_tags);
+
+/**
+ * blk_queue_init_tags - initialize the queue tag info
+ * @q:  the request queue for the device
+ * @depth:  the maximum queue depth supported
+ * @tags: the tag to use
+ *
+ * Queue lock must be held here if the function is called to resize an
+ * existing map.
+ **/
+int blk_queue_init_tags(struct request_queue *q, int depth,
+			struct blk_queue_tag *tags)
+{
+	int rc;
+
+	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
+
+	if (!tags && !q->queue_tags) {
+		tags = __blk_queue_init_tags(q, depth);
+
+		if (!tags)
+			return -ENOMEM;
+
+	} else if (q->queue_tags) {
+		rc = blk_queue_resize_tags(q, depth);
+		if (rc)
+			return rc;
+		queue_flag_set(QUEUE_FLAG_QUEUED, q);
+		return 0;
+	} else
+		atomic_inc(&tags->refcnt);
+
+	/*
+	 * assign it, all done
+	 */
+	q->queue_tags = tags;
+	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
+	INIT_LIST_HEAD(&q->tag_busy_list);
+	return 0;
+}
+EXPORT_SYMBOL(blk_queue_init_tags);
+
+/**
+ * blk_queue_resize_tags - change the queueing depth
+ * @q:  the request queue for the device
+ * @new_depth: the new max command queueing depth
+ *
+ *  Notes:
+ *    Must be called with the queue lock held.
+ **/
+int blk_queue_resize_tags(struct request_queue *q, int new_depth)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+	struct request **tag_index;
+	unsigned long *tag_map;
+	int max_depth, nr_ulongs;
+
+	if (!bqt)
+		return -ENXIO;
+
+	/*
+	 * if we already have large enough real_max_depth.  just
+	 * adjust max_depth.  *NOTE* as requests with tag value
+	 * between new_depth and real_max_depth can be in-flight, tag
+	 * map can not be shrunk blindly here.
+	 */
+	if (new_depth <= bqt->real_max_depth) {
+		bqt->max_depth = new_depth;
+		return 0;
+	}
+
+	/*
+	 * Currently cannot replace a shared tag map with a new
+	 * one, so error out if this is the case
+	 */
+	if (atomic_read(&bqt->refcnt) != 1)