|
@@ -3885,3 +3885,126 @@ static void cfq_idle_slice_timer(unsigned long data)
|
|
* expired
|
|
* expired
|
|
*/
|
|
*/
|
|
if (cfq_slice_used(cfqq))
|
|
if (cfq_slice_used(cfqq))
|
|
|
|
+ goto expire;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * only expire and reinvoke request handler, if there are
|
|
|
|
+ * other queues with pending requests
|
|
|
|
+ */
|
|
|
|
+ if (!cfqd->busy_queues)
|
|
|
|
+ goto out_cont;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * not expired and it has a request pending, let it dispatch
|
|
|
|
+ */
|
|
|
|
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
|
|
|
|
+ goto out_kick;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Queue depth flag is reset only when the idle didn't succeed
|
|
|
|
+ */
|
|
|
|
+ cfq_clear_cfqq_deep(cfqq);
|
|
|
|
+ }
|
|
|
|
+expire:
|
|
|
|
+ cfq_slice_expired(cfqd, timed_out);
|
|
|
|
+out_kick:
|
|
|
|
+ cfq_schedule_dispatch(cfqd);
|
|
|
|
+out_cont:
|
|
|
|
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
|
|
|
+{
|
|
|
|
+ del_timer_sync(&cfqd->idle_slice_timer);
|
|
|
|
+ cancel_work_sync(&cfqd->unplug_work);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cfq_put_async_queues(struct cfq_data *cfqd)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < IOPRIO_BE_NR; i++) {
|
|
|
|
+ if (cfqd->async_cfqq[0][i])
|
|
|
|
+ cfq_put_queue(cfqd->async_cfqq[0][i]);
|
|
|
|
+ if (cfqd->async_cfqq[1][i])
|
|
|
|
+ cfq_put_queue(cfqd->async_cfqq[1][i]);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (cfqd->async_idle_cfqq)
|
|
|
|
+ cfq_put_queue(cfqd->async_idle_cfqq);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cfq_exit_queue(struct elevator_queue *e)
|
|
|
|
+{
|
|
|
|
+ struct cfq_data *cfqd = e->elevator_data;
|
|
|
|
+ struct request_queue *q = cfqd->queue;
|
|
|
|
+
|
|
|
|
+ cfq_shutdown_timer_wq(cfqd);
|
|
|
|
+
|
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
|
+
|
|
|
|
+ if (cfqd->active_queue)
|
|
|
|
+ __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
|
|
|
|
+
|
|
|
|
+ cfq_put_async_queues(cfqd);
|
|
|
|
+
|
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
|
+
|
|
|
|
+ cfq_shutdown_timer_wq(cfqd);
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
|
+ blkcg_deactivate_policy(q, &blkcg_policy_cfq);
|
|
|
|
+#else
|
|
|
|
+ kfree(cfqd->root_group);
|
|
|
|
+#endif
|
|
|
|
+ kfree(cfqd);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cfq_init_queue(struct request_queue *q)
|
|
|
|
+{
|
|
|
|
+ struct cfq_data *cfqd;
|
|
|
|
+ struct blkcg_gq *blkg __maybe_unused;
|
|
|
|
+ int i, ret;
|
|
|
|
+
|
|
|
|
+ cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
|
|
|
|
+ if (!cfqd)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ cfqd->queue = q;
|
|
|
|
+ q->elevator->elevator_data = cfqd;
|
|
|
|
+
|
|
|
|
+ /* Init root service tree */
|
|
|
|
+ cfqd->grp_service_tree = CFQ_RB_ROOT;
|
|
|
|
+
|
|
|
|
+ /* Init root group and prefer root group over other groups by default */
|
|
|
|
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
|
+ ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto out_free;
|
|
|
|
+
|
|
|
|
+ cfqd->root_group = blkg_to_cfqg(q->root_blkg);
|
|
|
|
+#else
|
|
|
|
+ ret = -ENOMEM;
|
|
|
|
+ cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
|
|
|
|
+ GFP_KERNEL, cfqd->queue->node);
|
|
|
|
+ if (!cfqd->root_group)
|
|
|
|
+ goto out_free;
|
|
|
|
+
|
|
|
|
+ cfq_init_cfqg_base(cfqd->root_group);
|
|
|
|
+#endif
|
|
|
|
+ cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Not strictly needed (since RB_ROOT just clears the node and we
|
|
|
|
+ * zeroed cfqd on alloc), but better be safe in case someone decides
|
|
|
|
+ * to add magic to the rb code
|
|
|
|
+ */
|
|
|
|
+ for (i = 0; i < CFQ_PRIO_LISTS; i++)
|
|
|
|
+ cfqd->prio_trees[i] = RB_ROOT;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
|
|
|
|
+ * Grab a permanent reference to it, so that the normal code flow
|
|
|
|
+ * will not attempt to free it. oom_cfqq is linked to root_group
|
|
|
|
+ * but shouldn't hold a reference as it'll never be unlinked. Lose
|
|
|
|
+ * the reference from linking right away.
|