Browse Source

efHotAgingTrendMining main.c 朱俊杰 commit at 2021-03-24

朱俊杰 4 năm trước cách đây
mục cha
commit
f2f5cd6cc6
1 tập tin đã thay đổi với 162 bổ sung0 xóa
  1. 162 0
      efHotAgingTrendMining/main.c

+ 162 - 0
efHotAgingTrendMining/main.c

@@ -881,3 +881,165 @@ static unsigned int bsg_poll(struct file *file, poll_table *wait)
 	spin_unlock_irq(&bd->lock);
 
 	return mask;
+}
+
+static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct bsg_device *bd = file->private_data;
+	int __user *uarg = (int __user *) arg;
+	int ret;
+
+	switch (cmd) {
+		/*
+		 * our own ioctls
+		 */
+	case SG_GET_COMMAND_Q:
+		return put_user(bd->max_queue, uarg);
+	case SG_SET_COMMAND_Q: {
+		int queue;
+
+		if (get_user(queue, uarg))
+			return -EFAULT;
+		if (queue < 1)
+			return -EINVAL;
+
+		spin_lock_irq(&bd->lock);
+		bd->max_queue = queue;
+		spin_unlock_irq(&bd->lock);
+		return 0;
+	}
+
+	/*
+	 * SCSI/sg ioctls
+	 */
+	case SG_GET_VERSION_NUM:
+	case SCSI_IOCTL_GET_IDLUN:
+	case SCSI_IOCTL_GET_BUS_NUMBER:
+	case SG_SET_TIMEOUT:
+	case SG_GET_TIMEOUT:
+	case SG_GET_RESERVED_SIZE:
+	case SG_SET_RESERVED_SIZE:
+	case SG_EMULATED_HOST:
+	case SCSI_IOCTL_SEND_COMMAND: {
+		void __user *uarg = (void __user *) arg;
+		return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
+	}
+	case SG_IO: {
+		struct request *rq;
+		struct bio *bio, *bidi_bio = NULL;
+		struct sg_io_v4 hdr;
+		int at_head;
+		u8 sense[SCSI_SENSE_BUFFERSIZE];
+
+		if (copy_from_user(&hdr, uarg, sizeof(hdr)))
+			return -EFAULT;
+
+		rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
+		if (IS_ERR(rq))
+			return PTR_ERR(rq);
+
+		bio = rq->bio;
+		if (rq->next_rq)
+			bidi_bio = rq->next_rq->bio;
+
+		at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
+		blk_execute_rq(bd->queue, NULL, rq, at_head);
+		ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
+
+		if (copy_to_user(uarg, &hdr, sizeof(hdr)))
+			return -EFAULT;
+
+		return ret;
+	}
+	/*
+	 * block device ioctls
+	 */
+	default:
+#if 0
+		return ioctl_by_bdev(bd->bdev, cmd, arg);
+#else
+		return -ENOTTY;
+#endif
+	}
+}
+
+static const struct file_operations bsg_fops = {
+	.read		=	bsg_read,
+	.write		=	bsg_write,
+	.poll		=	bsg_poll,
+	.open		=	bsg_open,
+	.release	=	bsg_release,
+	.unlocked_ioctl	=	bsg_ioctl,
+	.owner		=	THIS_MODULE,
+	.llseek		=	default_llseek,
+};
+
+void bsg_unregister_queue(struct request_queue *q)
+{
+	struct bsg_class_device *bcd = &q->bsg_dev;
+
+	if (!bcd->class_dev)
+		return;
+
+	mutex_lock(&bsg_mutex);
+	idr_remove(&bsg_minor_idr, bcd->minor);
+	if (q->kobj.sd)
+		sysfs_remove_link(&q->kobj, "bsg");
+	device_unregister(bcd->class_dev);
+	bcd->class_dev = NULL;
+	kref_put(&bcd->ref, bsg_kref_release_function);
+	mutex_unlock(&bsg_mutex);
+}
+EXPORT_SYMBOL_GPL(bsg_unregister_queue);
+
+int bsg_register_queue(struct request_queue *q, struct device *parent,
+		       const char *name, void (*release)(struct device *))
+{
+	struct bsg_class_device *bcd;
+	dev_t dev;
+	int ret, minor;
+	struct device *class_dev = NULL;
+	const char *devname;
+
+	if (name)
+		devname = name;
+	else
+		devname = dev_name(parent);
+
+	/*
+	 * we need a proper transport to send commands, not a stacked device
+	 */
+	if (!q->request_fn)
+		return 0;
+
+	bcd = &q->bsg_dev;
+	memset(bcd, 0, sizeof(*bcd));
+
+	mutex_lock(&bsg_mutex);
+
+	ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
+	if (!ret) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
+	if (ret < 0)
+		goto unlock;
+
+	if (minor >= BSG_MAX_DEVS) {
+		printk(KERN_ERR "bsg: too many bsg devices\n");
+		ret = -EINVAL;
+		goto remove_idr;
+	}
+
+	bcd->minor = minor;
+	bcd->queue = q;
+	bcd->parent = get_device(parent);
+	bcd->release = release;
+	kref_init(&bcd->ref);
+	dev = MKDEV(bsg_major, bcd->minor);
+	class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
+	if (IS_ERR(class_dev)) {
+		ret = PTR_ERR(class_dev);
+		goto put_dev;