Browse Source

efHotAgingTrendMining main.c 朱俊杰 commit at 2020-11-09

朱俊杰 4 years ago
parent
commit
c35fb06eb7
1 changed files with 190 additions and 0 deletions
  1. 190 0
      efHotAgingTrendMining/main.c

+ 190 - 0
efHotAgingTrendMining/main.c

@@ -197,3 +197,193 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
 	 */
 	rq->cmd_len = hdr->request_len;
 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+	rq->timeout = msecs_to_jiffies(hdr->timeout);
+	if (!rq->timeout)
+		rq->timeout = q->sg_timeout;
+	if (!rq->timeout)
+		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+		rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+	return 0;
+}
+
+/*
+ * Check if sg_io_v4 from user is allowed and valid
+ */
+static int
+bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
+{
+	int ret = 0;
+
+	if (hdr->guard != 'Q')
+		return -EINVAL;
+
+	switch (hdr->protocol) {
+	case BSG_PROTOCOL_SCSI:
+		switch (hdr->subprotocol) {
+		case BSG_SUB_PROTOCOL_SCSI_CMD:
+		case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
+			break;
+		default:
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	*rw = hdr->dout_xfer_len ? WRITE : READ;
+	return ret;
+}
+
+/*
+ * map sg_io_v4 to a request.
+ */
+static struct request *
+bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
+	    u8 *sense)
+{
+	struct request_queue *q = bd->queue;
+	struct request *rq, *next_rq = NULL;
+	int ret, rw;
+	unsigned int dxfer_len;
+	void __user *dxferp = NULL;
+	struct bsg_class_device *bcd = &q->bsg_dev;
+
+	/* if the LLD has been removed then the bsg_unregister_queue will
+	 * eventually be called and the class_dev was freed, so we can no
+	 * longer use this request_queue. Return no such address.
+	 */
+	if (!bcd->class_dev)
+		return ERR_PTR(-ENXIO);
+
+	dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
+		hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
+		hdr->din_xfer_len);
+
+	ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
+	if (ret)
+		return ERR_PTR(ret);
+
+	/*
+	 * map scatter-gather elements separately and string them to request
+	 */
+	rq = blk_get_request(q, rw, GFP_KERNEL);
+	if (!rq)
+		return ERR_PTR(-ENOMEM);
+	ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
+	if (ret)
+		goto out;
+
+	if (rw == WRITE && hdr->din_xfer_len) {
+		if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
+			ret = -EOPNOTSUPP;
+			goto out;
+		}
+
+		next_rq = blk_get_request(q, READ, GFP_KERNEL);
+		if (!next_rq) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		rq->next_rq = next_rq;
+		next_rq->cmd_type = rq->cmd_type;
+
+		dxferp = (void __user *)(unsigned long)hdr->din_xferp;
+		ret =  blk_rq_map_user(q, next_rq, NULL, dxferp,
+				       hdr->din_xfer_len, GFP_KERNEL);
+		if (ret)
+			goto out;
+	}
+
+	if (hdr->dout_xfer_len) {
+		dxfer_len = hdr->dout_xfer_len;
+		dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
+	} else if (hdr->din_xfer_len) {
+		dxfer_len = hdr->din_xfer_len;
+		dxferp = (void __user *)(unsigned long)hdr->din_xferp;
+	} else
+		dxfer_len = 0;
+
+	if (dxfer_len) {
+		ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
+				      GFP_KERNEL);
+		if (ret)
+			goto out;
+	}
+
+	rq->sense = sense;
+	rq->sense_len = 0;
+
+	return rq;
+out:
+	if (rq->cmd != rq->__cmd)
+		kfree(rq->cmd);
+	blk_put_request(rq);
+	if (next_rq) {
+		blk_rq_unmap_user(next_rq->bio);
+		blk_put_request(next_rq);
+	}
+	return ERR_PTR(ret);
+}
+
+/*
+ * async completion call-back from the block layer, when scsi/ide/whatever
+ * calls end_that_request_last() on a request
+ */
+static void bsg_rq_end_io(struct request *rq, int uptodate)
+{
+	struct bsg_command *bc = rq->end_io_data;
+	struct bsg_device *bd = bc->bd;
+	unsigned long flags;
+
+	dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
+		bd->name, rq, bc, bc->bio, uptodate);
+
+	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
+
+	spin_lock_irqsave(&bd->lock, flags);
+	list_move_tail(&bc->list, &bd->done_list);
+	bd->done_cmds++;
+	spin_unlock_irqrestore(&bd->lock, flags);
+
+	wake_up(&bd->wq_done);
+}
+
+/*
+ * do final setup of a 'bc' and submit the matching 'rq' to the block
+ * layer for io
+ */
+static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
+			    struct bsg_command *bc, struct request *rq)
+{
+	int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
+
+	/*
+	 * add bc command to busy queue and submit rq for io
+	 */
+	bc->rq = rq;
+	bc->bio = rq->bio;
+	if (rq->next_rq)
+		bc->bidi_bio = rq->next_rq->bio;
+	bc->hdr.duration = jiffies;
+	spin_lock_irq(&bd->lock);
+	list_add_tail(&bc->list, &bd->busy_list);
+	spin_unlock_irq(&bd->lock);
+
+	dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
+
+	rq->end_io_data = bc;
+	blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
+}
+
+static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
+{
+	struct bsg_command *bc = NULL;
+
+	spin_lock_irq(&bd->lock);
+	if (bd->done_cmds) {
+		bc = list_first_entry(&bd->done_list, struct bsg_command, list);
+		list_del(&bc->list);