| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273 | /* * Functions related to sysfs handling */#include <linux/kernel.h>#include <linux/slab.h>#include <linux/module.h>#include <linux/bio.h>#include <linux/blkdev.h>#include <linux/blktrace_api.h>#include "blk.h"#include "blk-cgroup.h"struct queue_sysfs_entry {	struct attribute attr;	ssize_t (*show)(struct request_queue *, char *);	ssize_t (*store)(struct request_queue *, const char *, size_t);};static ssize_tqueue_var_show(unsigned long var, char *page){	return sprintf(page, "%lu\n", var);}static ssize_tqueue_var_store(unsigned long *var, const char *page, size_t count){	int err;	unsigned long v;	err = strict_strtoul(page, 10, &v);	if (err || v > UINT_MAX)		return -EINVAL;	*var = v;	return count;}static ssize_t queue_requests_show(struct request_queue *q, char *page){	return queue_var_show(q->nr_requests, (page));}static ssize_tqueue_requests_store(struct request_queue *q, const char *page, size_t count){	struct request_list *rl;	unsigned long nr;	int ret;	if (!q->request_fn)		return -EINVAL;	ret = queue_var_store(&nr, page, count);	if (ret < 0)		return ret;	if (nr < BLKDEV_MIN_RQ)		nr = BLKDEV_MIN_RQ;	spin_lock_irq(q->queue_lock);	q->nr_requests = nr;	blk_queue_congestion_threshold(q);	/* congestion isn't cgroup aware and follows root blkcg for now */	rl = &q->root_rl;	if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))		blk_set_queue_congested(q, BLK_RW_SYNC);	else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))		blk_clear_queue_congested(q, BLK_RW_SYNC);
 |