|
@@ -698,3 +698,98 @@ static struct bsg_device *bsg_alloc_device(void)
|
|
|
struct bsg_device *bd;
|
|
|
|
|
|
bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
|
|
|
+ if (unlikely(!bd))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ spin_lock_init(&bd->lock);
|
|
|
+
|
|
|
+ bd->max_queue = BSG_DEFAULT_CMDS;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&bd->busy_list);
|
|
|
+ INIT_LIST_HEAD(&bd->done_list);
|
|
|
+ INIT_HLIST_NODE(&bd->dev_list);
|
|
|
+
|
|
|
+ init_waitqueue_head(&bd->wq_free);
|
|
|
+ init_waitqueue_head(&bd->wq_done);
|
|
|
+ return bd;
|
|
|
+}
|
|
|
+
|
|
|
+static void bsg_kref_release_function(struct kref *kref)
|
|
|
+{
|
|
|
+ struct bsg_class_device *bcd =
|
|
|
+ container_of(kref, struct bsg_class_device, ref);
|
|
|
+ struct device *parent = bcd->parent;
|
|
|
+
|
|
|
+ if (bcd->release)
|
|
|
+ bcd->release(bcd->parent);
|
|
|
+
|
|
|
+ put_device(parent);
|
|
|
+}
|
|
|
+
|
|
|
+static int bsg_put_device(struct bsg_device *bd)
|
|
|
+{
|
|
|
+ int ret = 0, do_free;
|
|
|
+ struct request_queue *q = bd->queue;
|
|
|
+
|
|
|
+ mutex_lock(&bsg_mutex);
|
|
|
+
|
|
|
+ do_free = atomic_dec_and_test(&bd->ref_count);
|
|
|
+ if (!do_free) {
|
|
|
+ mutex_unlock(&bsg_mutex);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ hlist_del(&bd->dev_list);
|
|
|
+ mutex_unlock(&bsg_mutex);
|
|
|
+
|
|
|
+ dprintk("%s: tearing down\n", bd->name);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * close can always block
|
|
|
+ */
|
|
|
+ set_bit(BSG_F_BLOCK, &bd->flags);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * correct error detection baddies here again. it's the responsibility
|
|
|
+ * of the app to properly reap commands before close() if it wants
|
|
|
+ * fool-proof error detection
|
|
|
+ */
|
|
|
+ ret = bsg_complete_all_commands(bd);
|
|
|
+
|
|
|
+ kfree(bd);
|
|
|
+out:
|
|
|
+ kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
|
|
|
+ if (do_free)
|
|
|
+ blk_put_queue(q);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static struct bsg_device *bsg_add_device(struct inode *inode,
|
|
|
+ struct request_queue *rq,
|
|
|
+ struct file *file)
|
|
|
+{
|
|
|
+ struct bsg_device *bd;
|
|
|
+#ifdef BSG_DEBUG
|
|
|
+ unsigned char buf[32];
|
|
|
+#endif
|
|
|
+ if (!blk_get_queue(rq))
|
|
|
+ return ERR_PTR(-ENXIO);
|
|
|
+
|
|
|
+ bd = bsg_alloc_device();
|
|
|
+ if (!bd) {
|
|
|
+ blk_put_queue(rq);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ }
|
|
|
+
|
|
|
+ bd->queue = rq;
|
|
|
+
|
|
|
+ bsg_set_block(bd, file);
|
|
|
+
|
|
|
+ atomic_set(&bd->ref_count, 1);
|
|
|
+ mutex_lock(&bsg_mutex);
|
|
|
+ hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
|
|
|
+
|
|
|
+ strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
|
|
|
+ dprintk("bound to <%s>, max queue %d\n",
|
|
|
+ format_dev_t(buf, inode->i_rdev), bd->max_queue);
|
|
|
+
|