|
@@ -387,3 +387,190 @@ static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
|
|
|
if (bd->done_cmds) {
|
|
|
bc = list_first_entry(&bd->done_list, struct bsg_command, list);
|
|
|
list_del(&bc->list);
|
|
|
+ bd->done_cmds--;
|
|
|
+ }
|
|
|
+ spin_unlock_irq(&bd->lock);
|
|
|
+
|
|
|
+ return bc;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Get a finished command from the done list
|
|
|
+ */
|
|
|
+static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
|
|
|
+{
|
|
|
+ struct bsg_command *bc;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ do {
|
|
|
+ bc = bsg_next_done_cmd(bd);
|
|
|
+ if (bc)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
|
|
|
+ bc = ERR_PTR(-EAGAIN);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
|
|
|
+ if (ret) {
|
|
|
+ bc = ERR_PTR(-ERESTARTSYS);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } while (1);
|
|
|
+
|
|
|
+ dprintk("%s: returning done %p\n", bd->name, bc);
|
|
|
+
|
|
|
+ return bc;
|
|
|
+}
|
|
|
+
|
|
|
+static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
|
|
+ struct bio *bio, struct bio *bidi_bio)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
|
|
|
+ /*
|
|
|
+ * fill in all the output members
|
|
|
+ */
|
|
|
+ hdr->device_status = rq->errors & 0xff;
|
|
|
+ hdr->transport_status = host_byte(rq->errors);
|
|
|
+ hdr->driver_status = driver_byte(rq->errors);
|
|
|
+ hdr->info = 0;
|
|
|
+ if (hdr->device_status || hdr->transport_status || hdr->driver_status)
|
|
|
+ hdr->info |= SG_INFO_CHECK;
|
|
|
+ hdr->response_len = 0;
|
|
|
+
|
|
|
+ if (rq->sense_len && hdr->response) {
|
|
|
+ int len = min_t(unsigned int, hdr->max_response_len,
|
|
|
+ rq->sense_len);
|
|
|
+
|
|
|
+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
|
|
|
+ rq->sense, len);
|
|
|
+ if (!ret)
|
|
|
+ hdr->response_len = len;
|
|
|
+ else
|
|
|
+ ret = -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (rq->next_rq) {
|
|
|
+ hdr->dout_resid = rq->resid_len;
|
|
|
+ hdr->din_resid = rq->next_rq->resid_len;
|
|
|
+ blk_rq_unmap_user(bidi_bio);
|
|
|
+ blk_put_request(rq->next_rq);
|
|
|
+ } else if (rq_data_dir(rq) == READ)
|
|
|
+ hdr->din_resid = rq->resid_len;
|
|
|
+ else
|
|
|
+ hdr->dout_resid = rq->resid_len;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the request generated a negative error number, return it
|
|
|
+ * (providing we aren't already returning an error); if it's
|
|
|
+ * just a protocol response (i.e. non negative), that gets
|
|
|
+ * processed above.
|
|
|
+ */
|
|
|
+ if (!ret && rq->errors < 0)
|
|
|
+ ret = rq->errors;
|
|
|
+
|
|
|
+ blk_rq_unmap_user(bio);
|
|
|
+ if (rq->cmd != rq->__cmd)
|
|
|
+ kfree(rq->cmd);
|
|
|
+ blk_put_request(rq);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int bsg_complete_all_commands(struct bsg_device *bd)
|
|
|
+{
|
|
|
+ struct bsg_command *bc;
|
|
|
+ int ret, tret;
|
|
|
+
|
|
|
+ dprintk("%s: entered\n", bd->name);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * wait for all commands to complete
|
|
|
+ */
|
|
|
+ ret = 0;
|
|
|
+ do {
|
|
|
+ ret = bsg_io_schedule(bd);
|
|
|
+ /*
|
|
|
+ * look for -ENODATA specifically -- we'll sometimes get
|
|
|
+ * -ERESTARTSYS when we've taken a signal, but we can't
|
|
|
+ * return until we're done freeing the queue, so ignore
|
|
|
+ * it. The signal will get handled when we're done freeing
|
|
|
+ * the bsg_device.
|
|
|
+ */
|
|
|
+ } while (ret != -ENODATA);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * discard done commands
|
|
|
+ */
|
|
|
+ ret = 0;
|
|
|
+ do {
|
|
|
+ spin_lock_irq(&bd->lock);
|
|
|
+ if (!bd->queued_cmds) {
|
|
|
+ spin_unlock_irq(&bd->lock);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ spin_unlock_irq(&bd->lock);
|
|
|
+
|
|
|
+ bc = bsg_get_done_cmd(bd);
|
|
|
+ if (IS_ERR(bc))
|
|
|
+ break;
|
|
|
+
|
|
|
+ tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
|
|
|
+ bc->bidi_bio);
|
|
|
+ if (!ret)
|
|
|
+ ret = tret;
|
|
|
+
|
|
|
+ bsg_free_command(bc);
|
|
|
+ } while (1);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
|
|
|
+ const struct iovec *iov, ssize_t *bytes_read)
|
|
|
+{
|
|
|
+ struct bsg_command *bc;
|
|
|
+ int nr_commands, ret;
|
|
|
+
|
|
|
+ if (count % sizeof(struct sg_io_v4))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ ret = 0;
|
|
|
+ nr_commands = count / sizeof(struct sg_io_v4);
|
|
|
+ while (nr_commands) {
|
|
|
+ bc = bsg_get_done_cmd(bd);
|
|
|
+ if (IS_ERR(bc)) {
|
|
|
+ ret = PTR_ERR(bc);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * this is the only case where we need to copy data back
|
|
|
+ * after completing the request. so do that here,
|
|
|
+ * bsg_complete_work() cannot do that for us
|
|
|
+ */
|
|
|
+ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
|
|
|
+ bc->bidi_bio);
|
|
|
+
|
|
|
+ if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
|
|
|
+ ret = -EFAULT;
|
|
|
+
|
|
|
+ bsg_free_command(bc);
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+
|
|
|
+ buf += sizeof(struct sg_io_v4);
|
|
|
+ *bytes_read += sizeof(struct sg_io_v4);
|
|
|
+ nr_commands--;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
|
|
|
+{
|