|
@@ -4008,3 +4008,93 @@ static int cfq_init_queue(struct request_queue *q)
|
|
|
* will not attempt to free it. oom_cfqq is linked to root_group
|
|
|
* but shouldn't hold a reference as it'll never be unlinked. Lose
|
|
|
* the reference from linking right away.
|
|
|
+ */
|
|
|
+ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
|
|
|
+ cfqd->oom_cfqq.ref++;
|
|
|
+
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+ cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
|
|
|
+ cfqg_put(cfqd->root_group);
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+
|
|
|
+ init_timer(&cfqd->idle_slice_timer);
|
|
|
+ cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
|
|
|
+ cfqd->idle_slice_timer.data = (unsigned long) cfqd;
|
|
|
+
|
|
|
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
|
|
|
+
|
|
|
+ cfqd->cfq_quantum = cfq_quantum;
|
|
|
+ cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
|
|
|
+ cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
|
|
|
+ cfqd->cfq_back_max = cfq_back_max;
|
|
|
+ cfqd->cfq_back_penalty = cfq_back_penalty;
|
|
|
+ cfqd->cfq_slice[0] = cfq_slice_async;
|
|
|
+ cfqd->cfq_slice[1] = cfq_slice_sync;
|
|
|
+ cfqd->cfq_target_latency = cfq_target_latency;
|
|
|
+ cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
|
|
|
+ cfqd->cfq_slice_idle = cfq_slice_idle;
|
|
|
+ cfqd->cfq_group_idle = cfq_group_idle;
|
|
|
+ cfqd->cfq_latency = 1;
|
|
|
+ cfqd->hw_tag = -1;
|
|
|
+ /*
|
|
|
+ * we optimistically start assuming sync ops weren't delayed in last
|
|
|
+ * second, in order to have larger depth for async operations.
|
|
|
+ */
|
|
|
+ cfqd->last_delayed_sync = jiffies - HZ;
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_free:
|
|
|
+ kfree(cfqd);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * sysfs parts below -->
|
|
|
+ */
|
|
|
+static ssize_t
|
|
|
+cfq_var_show(unsigned int var, char *page)
|
|
|
+{
|
|
|
+ return sprintf(page, "%d\n", var);
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t
|
|
|
+cfq_var_store(unsigned int *var, const char *page, size_t count)
|
|
|
+{
|
|
|
+ char *p = (char *) page;
|
|
|
+
|
|
|
+ *var = simple_strtoul(p, &p, 10);
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
|
|
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
|
|
|
+{ \
|
|
|
+ struct cfq_data *cfqd = e->elevator_data; \
|
|
|
+ unsigned int __data = __VAR; \
|
|
|
+ if (__CONV) \
|
|
|
+ __data = jiffies_to_msecs(__data); \
|
|
|
+ return cfq_var_show(__data, (page)); \
|
|
|
+}
|
|
|
+SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
|
|
|
+SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
|
|
|
+SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
|
|
|
+SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
|
|
|
+SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
|
|
|
+SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
|
|
|
+SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
|
|
|
+SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
|
|
|
+SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
|
|
|
+SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
|
|
|
+SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
|
|
|
+SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
|
|
|
+#undef SHOW_FUNCTION
|
|
|
+
|
|
|
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
|
|
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
|
|
|
+{ \
|
|
|
+ struct cfq_data *cfqd = e->elevator_data; \
|
|
|
+ unsigned int __data; \
|
|
|
+ int ret = cfq_var_store(&__data, (page), count); \
|
|
|
+ if (__data < (MIN)) \
|
|
|
+ __data = (MIN); \
|
|
|
+ else if (__data > (MAX)) \
|