|
@@ -3133,3 +3133,135 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
if (is_sync) {
|
|
|
if (!cfq_class_idle(cfqq))
|
|
|
cfq_mark_cfqq_idle_window(cfqq);
|
|
|
+ cfq_mark_cfqq_sync(cfqq);
|
|
|
+ }
|
|
|
+ cfqq->pid = pid;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
+static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
|
|
|
+{
|
|
|
+ struct cfq_data *cfqd = cic_to_cfqd(cic);
|
|
|
+ struct cfq_queue *sync_cfqq;
|
|
|
+ uint64_t id;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ id = bio_blkcg(bio)->id;
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check whether blkcg has changed. The condition may trigger
|
|
|
+ * spuriously on a newly created cic but there's no harm.
|
|
|
+ */
|
|
|
+ if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
|
|
|
+ return;
|
|
|
+
|
|
|
+ sync_cfqq = cic_to_cfqq(cic, 1);
|
|
|
+ if (sync_cfqq) {
|
|
|
+ /*
|
|
|
+ * Drop reference to sync queue. A new sync queue will be
|
|
|
+ * assigned in new group upon arrival of a fresh request.
|
|
|
+ */
|
|
|
+ cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
|
|
|
+ cic_set_cfqq(cic, NULL, 1);
|
|
|
+ cfq_put_queue(sync_cfqq);
|
|
|
+ }
|
|
|
+
|
|
|
+ cic->blkcg_id = id;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
|
|
|
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
|
|
|
+
|
|
|
+static struct cfq_queue *
|
|
|
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
|
|
|
+ struct bio *bio, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ struct blkcg *blkcg;
|
|
|
+ struct cfq_queue *cfqq, *new_cfqq = NULL;
|
|
|
+ struct cfq_group *cfqg;
|
|
|
+
|
|
|
+retry:
|
|
|
+ rcu_read_lock();
|
|
|
+
|
|
|
+ blkcg = bio_blkcg(bio);
|
|
|
+ cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
|
|
|
+ cfqq = cic_to_cfqq(cic, is_sync);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Always try a new alloc if we fell back to the OOM cfqq
|
|
|
+ * originally, since it should just be a temporary situation.
|
|
|
+ */
|
|
|
+ if (!cfqq || cfqq == &cfqd->oom_cfqq) {
|
|
|
+ cfqq = NULL;
|
|
|
+ if (new_cfqq) {
|
|
|
+ cfqq = new_cfqq;
|
|
|
+ new_cfqq = NULL;
|
|
|
+ } else if (gfp_mask & __GFP_WAIT) {
|
|
|
+ rcu_read_unlock();
|
|
|
+ spin_unlock_irq(cfqd->queue->queue_lock);
|
|
|
+ new_cfqq = kmem_cache_alloc_node(cfq_pool,
|
|
|
+ gfp_mask | __GFP_ZERO,
|
|
|
+ cfqd->queue->node);
|
|
|
+ spin_lock_irq(cfqd->queue->queue_lock);
|
|
|
+ if (new_cfqq)
|
|
|
+ goto retry;
|
|
|
+ } else {
|
|
|
+ cfqq = kmem_cache_alloc_node(cfq_pool,
|
|
|
+ gfp_mask | __GFP_ZERO,
|
|
|
+ cfqd->queue->node);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cfqq) {
|
|
|
+ cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
|
|
|
+ cfq_init_prio_data(cfqq, cic);
|
|
|
+ cfq_link_cfqq_cfqg(cfqq, cfqg);
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "alloced");
|
|
|
+ } else
|
|
|
+ cfqq = &cfqd->oom_cfqq;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (new_cfqq)
|
|
|
+ kmem_cache_free(cfq_pool, new_cfqq);
|
|
|
+
|
|
|
+ rcu_read_unlock();
|
|
|
+ return cfqq;
|
|
|
+}
|
|
|
+
|
|
|
+static struct cfq_queue **
|
|
|
+cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
|
|
|
+{
|
|
|
+ switch (ioprio_class) {
|
|
|
+ case IOPRIO_CLASS_RT:
|
|
|
+ return &cfqd->async_cfqq[0][ioprio];
|
|
|
+ case IOPRIO_CLASS_NONE:
|
|
|
+ ioprio = IOPRIO_NORM;
|
|
|
+ /* fall through */
|
|
|
+ case IOPRIO_CLASS_BE:
|
|
|
+ return &cfqd->async_cfqq[1][ioprio];
|
|
|
+ case IOPRIO_CLASS_IDLE:
|
|
|
+ return &cfqd->async_idle_cfqq;
|
|
|
+ default:
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static struct cfq_queue *
|
|
|
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
|
|
|
+ struct bio *bio, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
|
|
|
+ const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
|
|
|
+ struct cfq_queue **async_cfqq = NULL;
|
|
|
+ struct cfq_queue *cfqq = NULL;
|
|
|
+
|
|
|
+ if (!is_sync) {
|
|
|
+ async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
|
|
|
+ cfqq = *async_cfqq;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!cfqq)
|
|
|
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * pin the queue now that it's allocated, scheduler exit will prune it
|