|
@@ -71,3 +71,116 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
|
blk_set_queue_congested(q, BLK_RW_SYNC);
|
|
blk_set_queue_congested(q, BLK_RW_SYNC);
|
|
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
|
|
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
|
|
blk_clear_queue_congested(q, BLK_RW_SYNC);
|
|
blk_clear_queue_congested(q, BLK_RW_SYNC);
|
|
|
|
+
|
|
|
|
+ if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
|
|
|
|
+ blk_set_queue_congested(q, BLK_RW_ASYNC);
|
|
|
|
+ else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
|
|
|
|
+ blk_clear_queue_congested(q, BLK_RW_ASYNC);
|
|
|
|
+
|
|
|
|
+ blk_queue_for_each_rl(rl, q) {
|
|
|
|
+ if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
|
|
|
|
+ blk_set_rl_full(rl, BLK_RW_SYNC);
|
|
|
|
+ } else {
|
|
|
|
+ blk_clear_rl_full(rl, BLK_RW_SYNC);
|
|
|
|
+ wake_up(&rl->wait[BLK_RW_SYNC]);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
|
|
|
|
+ blk_set_rl_full(rl, BLK_RW_ASYNC);
|
|
|
|
+ } else {
|
|
|
|
+ blk_clear_rl_full(rl, BLK_RW_ASYNC);
|
|
|
|
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_ra_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ unsigned long ra_kb = q->backing_dev_info.ra_pages <<
|
|
|
|
+ (PAGE_CACHE_SHIFT - 10);
|
|
|
|
+
|
|
|
|
+ return queue_var_show(ra_kb, (page));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t
|
|
|
|
+queue_ra_store(struct request_queue *q, const char *page, size_t count)
|
|
|
|
+{
|
|
|
|
+ unsigned long ra_kb;
|
|
|
|
+ ssize_t ret = queue_var_store(&ra_kb, page, count);
|
|
|
|
+
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ int max_sectors_kb = queue_max_sectors(q) >> 1;
|
|
|
|
+
|
|
|
|
+ return queue_var_show(max_sectors_kb, (page));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return queue_var_show(queue_max_segments(q), (page));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return queue_var_show(q->limits.max_integrity_segments, (page));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ if (blk_queue_cluster(q))
|
|
|
|
+ return queue_var_show(queue_max_segment_size(q), (page));
|
|
|
|
+
|
|
|
|
+ return queue_var_show(PAGE_CACHE_SIZE, (page));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return queue_var_show(queue_logical_block_size(q), page);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return queue_var_show(queue_physical_block_size(q), page);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_io_min_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return queue_var_show(queue_io_min(q), page);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return queue_var_show(queue_io_opt(q), page);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return queue_var_show(q->limits.discard_granularity, page);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return sprintf(page, "%llu\n",
|
|
|
|
+ (unsigned long long)q->limits.max_discard_sectors << 9);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return queue_var_show(queue_discard_zeroes_data(q), page);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
|
|
|
|
+{
|
|
|
|
+ return sprintf(page, "%llu\n",
|
|
|
|
+ (unsigned long long)q->limits.max_write_same_sectors << 9);
|