소스 검색

efHotAgingTrendMining main.c 朱俊杰 commit at 2020-10-21

朱俊杰 4 년 전
부모
커밋
d7e57efc52
1개의 변경된 파일199개의 추가작업 그리고 0개의 파일을 삭제
  1. 199 0
      efHotAgingTrendMining/main.c

+ 199 - 0
efHotAgingTrendMining/main.c

@@ -0,0 +1,199 @@
+/*
+ * bsg.c - block layer implementation of the sg v4 interface
+ *
+ * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
+ * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License version 2.  See the file "COPYING" in the main directory of this
+ *  archive for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/jiffies.h>
+#include <linux/percpu.h>
+#include <linux/uio.h>
+#include <linux/idr.h>
+#include <linux/bsg.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/sg.h>
+
+#define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"
+#define BSG_VERSION	"0.4"
+
+struct bsg_device {
+	struct request_queue *queue;
+	spinlock_t lock;
+	struct list_head busy_list;
+	struct list_head done_list;
+	struct hlist_node dev_list;
+	atomic_t ref_count;
+	int queued_cmds;
+	int done_cmds;
+	wait_queue_head_t wq_done;
+	wait_queue_head_t wq_free;
+	char name[20];
+	int max_queue;
+	unsigned long flags;
+};
+
+enum {
+	BSG_F_BLOCK		= 1,
+};
+
+#define BSG_DEFAULT_CMDS	64
+#define BSG_MAX_DEVS		32768
+
+#undef BSG_DEBUG
+
+#ifdef BSG_DEBUG
+#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
+#else
+#define dprintk(fmt, args...)
+#endif
+
+static DEFINE_MUTEX(bsg_mutex);
+static DEFINE_IDR(bsg_minor_idr);
+
+#define BSG_LIST_ARRAY_SIZE	8
+static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
+
+static struct class *bsg_class;
+static int bsg_major;
+
+static struct kmem_cache *bsg_cmd_cachep;
+
+/*
+ * our internal command type
+ */
+struct bsg_command {
+	struct bsg_device *bd;
+	struct list_head list;
+	struct request *rq;
+	struct bio *bio;
+	struct bio *bidi_bio;
+	int err;
+	struct sg_io_v4 hdr;
+	char sense[SCSI_SENSE_BUFFERSIZE];
+};
+
+static void bsg_free_command(struct bsg_command *bc)
+{
+	struct bsg_device *bd = bc->bd;
+	unsigned long flags;
+
+	kmem_cache_free(bsg_cmd_cachep, bc);
+
+	spin_lock_irqsave(&bd->lock, flags);
+	bd->queued_cmds--;
+	spin_unlock_irqrestore(&bd->lock, flags);
+
+	wake_up(&bd->wq_free);
+}
+
+static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
+{
+	struct bsg_command *bc = ERR_PTR(-EINVAL);
+
+	spin_lock_irq(&bd->lock);
+
+	if (bd->queued_cmds >= bd->max_queue)
+		goto out;
+
+	bd->queued_cmds++;
+	spin_unlock_irq(&bd->lock);
+
+	bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
+	if (unlikely(!bc)) {
+		spin_lock_irq(&bd->lock);
+		bd->queued_cmds--;
+		bc = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	bc->bd = bd;
+	INIT_LIST_HEAD(&bc->list);
+	dprintk("%s: returning free cmd %p\n", bd->name, bc);
+	return bc;
+out:
+	spin_unlock_irq(&bd->lock);
+	return bc;
+}
+
+static inline struct hlist_head *bsg_dev_idx_hash(int index)
+{
+	return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
+}
+
+static int bsg_io_schedule(struct bsg_device *bd)
+{
+	DEFINE_WAIT(wait);
+	int ret = 0;
+
+	spin_lock_irq(&bd->lock);
+
+	BUG_ON(bd->done_cmds > bd->queued_cmds);
+
+	/*
+	 * -ENOSPC or -ENODATA?  I'm going for -ENODATA, meaning "I have no
+	 * work to do", even though we return -ENOSPC after this same test
+	 * during bsg_write() -- there, it means our buffer can't have more
+	 * bsg_commands added to it, thus has no space left.
+	 */
+	if (bd->done_cmds == bd->queued_cmds) {
+		ret = -ENODATA;
+		goto unlock;
+	}
+
+	if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+		ret = -EAGAIN;
+		goto unlock;
+	}
+
+	prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
+	spin_unlock_irq(&bd->lock);
+	io_schedule();
+	finish_wait(&bd->wq_done, &wait);
+
+	return ret;
+unlock:
+	spin_unlock_irq(&bd->lock);
+	return ret;
+}
+
+static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
+				struct sg_io_v4 *hdr, struct bsg_device *bd,
+				fmode_t has_write_perm)
+{
+	if (hdr->request_len > BLK_MAX_CDB) {
+		rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
+		if (!rq->cmd)
+			return -ENOMEM;
+	}
+
+	if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
+			   hdr->request_len))
+		return -EFAULT;
+
+	if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+		if (blk_verify_command(rq->cmd, has_write_perm))
+			return -EPERM;
+	} else if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	/*
+	 * fill in request structure
+	 */
+	rq->cmd_len = hdr->request_len;
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;