|
@@ -402,3 +402,141 @@ static struct queue_sysfs_entry queue_write_same_max_entry = {
|
|
|
static struct queue_sysfs_entry queue_nonrot_entry = {
|
|
|
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
|
|
|
.show = queue_show_nonrot,
|
|
|
+ .store = queue_store_nonrot,
|
|
|
+};
|
|
|
+
|
|
|
+static struct queue_sysfs_entry queue_nomerges_entry = {
|
|
|
+ .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
|
|
|
+ .show = queue_nomerges_show,
|
|
|
+ .store = queue_nomerges_store,
|
|
|
+};
|
|
|
+
|
|
|
+static struct queue_sysfs_entry queue_rq_affinity_entry = {
|
|
|
+ .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
|
|
|
+ .show = queue_rq_affinity_show,
|
|
|
+ .store = queue_rq_affinity_store,
|
|
|
+};
|
|
|
+
|
|
|
+static struct queue_sysfs_entry queue_iostats_entry = {
|
|
|
+ .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
|
|
|
+ .show = queue_show_iostats,
|
|
|
+ .store = queue_store_iostats,
|
|
|
+};
|
|
|
+
|
|
|
+static struct queue_sysfs_entry queue_random_entry = {
|
|
|
+ .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
|
|
|
+ .show = queue_show_random,
|
|
|
+ .store = queue_store_random,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute *default_attrs[] = {
|
|
|
+ &queue_requests_entry.attr,
|
|
|
+ &queue_ra_entry.attr,
|
|
|
+ &queue_max_hw_sectors_entry.attr,
|
|
|
+ &queue_max_sectors_entry.attr,
|
|
|
+ &queue_max_segments_entry.attr,
|
|
|
+ &queue_max_integrity_segments_entry.attr,
|
|
|
+ &queue_max_segment_size_entry.attr,
|
|
|
+ &queue_iosched_entry.attr,
|
|
|
+ &queue_hw_sector_size_entry.attr,
|
|
|
+ &queue_logical_block_size_entry.attr,
|
|
|
+ &queue_physical_block_size_entry.attr,
|
|
|
+ &queue_io_min_entry.attr,
|
|
|
+ &queue_io_opt_entry.attr,
|
|
|
+ &queue_discard_granularity_entry.attr,
|
|
|
+ &queue_discard_max_entry.attr,
|
|
|
+ &queue_discard_zeroes_data_entry.attr,
|
|
|
+ &queue_write_same_max_entry.attr,
|
|
|
+ &queue_nonrot_entry.attr,
|
|
|
+ &queue_nomerges_entry.attr,
|
|
|
+ &queue_rq_affinity_entry.attr,
|
|
|
+ &queue_iostats_entry.attr,
|
|
|
+ &queue_random_entry.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
|
|
|
+
|
|
|
+static ssize_t
|
|
|
+queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
|
|
+{
|
|
|
+ struct queue_sysfs_entry *entry = to_queue(attr);
|
|
|
+ struct request_queue *q =
|
|
|
+ container_of(kobj, struct request_queue, kobj);
|
|
|
+ ssize_t res;
|
|
|
+
|
|
|
+ if (!entry->show)
|
|
|
+ return -EIO;
|
|
|
+ mutex_lock(&q->sysfs_lock);
|
|
|
+ if (blk_queue_dying(q)) {
|
|
|
+ mutex_unlock(&q->sysfs_lock);
|
|
|
+ return -ENOENT;
|
|
|
+ }
|
|
|
+ res = entry->show(q, page);
|
|
|
+ mutex_unlock(&q->sysfs_lock);
|
|
|
+ return res;
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t
|
|
|
+queue_attr_store(struct kobject *kobj, struct attribute *attr,
|
|
|
+ const char *page, size_t length)
|
|
|
+{
|
|
|
+ struct queue_sysfs_entry *entry = to_queue(attr);
|
|
|
+ struct request_queue *q;
|
|
|
+ ssize_t res;
|
|
|
+
|
|
|
+ if (!entry->store)
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ q = container_of(kobj, struct request_queue, kobj);
|
|
|
+ mutex_lock(&q->sysfs_lock);
|
|
|
+ if (blk_queue_dying(q)) {
|
|
|
+ mutex_unlock(&q->sysfs_lock);
|
|
|
+ return -ENOENT;
|
|
|
+ }
|
|
|
+ res = entry->store(q, page, length);
|
|
|
+ mutex_unlock(&q->sysfs_lock);
|
|
|
+ return res;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_release_queue: - release a &struct request_queue when it is no longer needed
|
|
|
+ * @kobj: the kobj belonging to the request queue to be released
|
|
|
+ *
|
|
|
+ * Description:
|
|
|
+ * blk_release_queue is the pair to blk_init_queue() or
|
|
|
+ * blk_queue_make_request(). It should be called when a request queue is
|
|
|
+ * being released; typically when a block device is being de-registered.
|
|
|
+ * Currently, its primary task it to free all the &struct request
|
|
|
+ * structures that were allocated to the queue and the queue itself.
|
|
|
+ *
|
|
|
+ * Caveat:
|
|
|
+ * Hopefully the low level driver will have finished any
|
|
|
+ * outstanding requests first...
|
|
|
+ **/
|
|
|
+static void blk_release_queue(struct kobject *kobj)
|
|
|
+{
|
|
|
+ struct request_queue *q =
|
|
|
+ container_of(kobj, struct request_queue, kobj);
|
|
|
+
|
|
|
+ blk_sync_queue(q);
|
|
|
+
|
|
|
+ blkcg_exit_queue(q);
|
|
|
+
|
|
|
+ if (q->elevator) {
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+ ioc_clear_queue(q);
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+ elevator_exit(q->elevator);
|
|
|
+ }
|
|
|
+
|
|
|
+ blk_exit_rl(&q->root_rl);
|
|
|
+
|
|
|
+ if (q->queue_tags)
|
|
|
+ __blk_queue_free_tags(q);
|
|
|
+
|
|
|
+ blk_trace_shutdown(q);
|
|
|
+
|
|
|
+ bdi_destroy(&q->backing_dev_info);
|
|
|
+
|
|
|
+ ida_simple_remove(&blk_queue_ida, q->id);
|