|
@@ -540,3 +540,74 @@ static void blk_release_queue(struct kobject *kobj)
|
|
|
bdi_destroy(&q->backing_dev_info);
|
|
|
|
|
|
ida_simple_remove(&blk_queue_ida, q->id);
|
|
|
+ kmem_cache_free(blk_requestq_cachep, q);
|
|
|
+}
|
|
|
+
|
|
|
+static const struct sysfs_ops queue_sysfs_ops = {
|
|
|
+ .show = queue_attr_show,
|
|
|
+ .store = queue_attr_store,
|
|
|
+};
|
|
|
+
|
|
|
+struct kobj_type blk_queue_ktype = {
|
|
|
+ .sysfs_ops = &queue_sysfs_ops,
|
|
|
+ .default_attrs = default_attrs,
|
|
|
+ .release = blk_release_queue,
|
|
|
+};
|
|
|
+
|
|
|
+int blk_register_queue(struct gendisk *disk)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ struct device *dev = disk_to_dev(disk);
|
|
|
+ struct request_queue *q = disk->queue;
|
|
|
+
|
|
|
+ if (WARN_ON(!q))
|
|
|
+ return -ENXIO;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Initialization must be complete by now. Finish the initial
|
|
|
+ * bypass from queue allocation.
|
|
|
+ */
|
|
|
+ blk_queue_bypass_end(q);
|
|
|
+
|
|
|
+ ret = blk_trace_init_sysfs(dev);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
|
|
|
+ if (ret < 0) {
|
|
|
+ blk_trace_remove_sysfs(dev);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ kobject_uevent(&q->kobj, KOBJ_ADD);
|
|
|
+
|
|
|
+ if (!q->request_fn)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ ret = elv_register_queue(q);
|
|
|
+ if (ret) {
|
|
|
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
|
|
+ kobject_del(&q->kobj);
|
|
|
+ blk_trace_remove_sysfs(dev);
|
|
|
+ kobject_put(&dev->kobj);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void blk_unregister_queue(struct gendisk *disk)
|
|
|
+{
|
|
|
+ struct request_queue *q = disk->queue;
|
|
|
+
|
|
|
+ if (WARN_ON(!q))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (q->request_fn)
|
|
|
+ elv_unregister_queue(q);
|
|
|
+
|
|
|
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
|
|
+ kobject_del(&q->kobj);
|
|
|
+ blk_trace_remove_sysfs(disk_to_dev(disk));
|
|
|
+ kobject_put(&disk_to_dev(disk)->kobj);
|
|
|
+}
|