|
@@ -2946,3 +2946,190 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
|
|
* queue always expire after 1 dispatch round.
|
|
|
*/
|
|
|
if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
|
|
|
+ cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
|
|
|
+ cfq_class_idle(cfqq))) {
|
|
|
+ cfqq->slice_end = jiffies + 1;
|
|
|
+ cfq_slice_expired(cfqd, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * task holds one reference to the queue, dropped when task exits. each rq
|
|
|
+ * in-flight on this queue also holds a reference, dropped when rq is freed.
|
|
|
+ *
|
|
|
+ * Each cfq queue took a reference on the parent group. Drop it now.
|
|
|
+ * queue lock must be held here.
|
|
|
+ */
|
|
|
+static void cfq_put_queue(struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ struct cfq_data *cfqd = cfqq->cfqd;
|
|
|
+ struct cfq_group *cfqg;
|
|
|
+
|
|
|
+ BUG_ON(cfqq->ref <= 0);
|
|
|
+
|
|
|
+ cfqq->ref--;
|
|
|
+ if (cfqq->ref)
|
|
|
+ return;
|
|
|
+
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "put_queue");
|
|
|
+ BUG_ON(rb_first(&cfqq->sort_list));
|
|
|
+ BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
|
|
|
+ cfqg = cfqq->cfqg;
|
|
|
+
|
|
|
+ if (unlikely(cfqd->active_queue == cfqq)) {
|
|
|
+ __cfq_slice_expired(cfqd, cfqq, 0);
|
|
|
+ cfq_schedule_dispatch(cfqd);
|
|
|
+ }
|
|
|
+
|
|
|
+ BUG_ON(cfq_cfqq_on_rr(cfqq));
|
|
|
+ kmem_cache_free(cfq_pool, cfqq);
|
|
|
+ cfqg_put(cfqg);
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_put_cooperator(struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ struct cfq_queue *__cfqq, *next;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this queue was scheduled to merge with another queue, be
|
|
|
+ * sure to drop the reference taken on that queue (and others in
|
|
|
+ * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
|
|
|
+ */
|
|
|
+ __cfqq = cfqq->new_cfqq;
|
|
|
+ while (__cfqq) {
|
|
|
+ if (__cfqq == cfqq) {
|
|
|
+ WARN(1, "cfqq->new_cfqq loop detected\n");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ next = __cfqq->new_cfqq;
|
|
|
+ cfq_put_queue(__cfqq);
|
|
|
+ __cfqq = next;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ if (unlikely(cfqq == cfqd->active_queue)) {
|
|
|
+ __cfq_slice_expired(cfqd, cfqq, 0);
|
|
|
+ cfq_schedule_dispatch(cfqd);
|
|
|
+ }
|
|
|
+
|
|
|
+ cfq_put_cooperator(cfqq);
|
|
|
+
|
|
|
+ cfq_put_queue(cfqq);
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_init_icq(struct io_cq *icq)
|
|
|
+{
|
|
|
+ struct cfq_io_cq *cic = icq_to_cic(icq);
|
|
|
+
|
|
|
+ cic->ttime.last_end_request = jiffies;
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_exit_icq(struct io_cq *icq)
|
|
|
+{
|
|
|
+ struct cfq_io_cq *cic = icq_to_cic(icq);
|
|
|
+ struct cfq_data *cfqd = cic_to_cfqd(cic);
|
|
|
+
|
|
|
+ if (cic->cfqq[BLK_RW_ASYNC]) {
|
|
|
+ cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
|
|
|
+ cic->cfqq[BLK_RW_ASYNC] = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cic->cfqq[BLK_RW_SYNC]) {
|
|
|
+ cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
|
|
|
+ cic->cfqq[BLK_RW_SYNC] = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
|
|
|
+{
|
|
|
+ struct task_struct *tsk = current;
|
|
|
+ int ioprio_class;
|
|
|
+
|
|
|
+ if (!cfq_cfqq_prio_changed(cfqq))
|
|
|
+ return;
|
|
|
+
|
|
|
+ ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
|
|
|
+ switch (ioprio_class) {
|
|
|
+ default:
|
|
|
+ printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
|
|
|
+ case IOPRIO_CLASS_NONE:
|
|
|
+ /*
|
|
|
+ * no prio set, inherit CPU scheduling settings
|
|
|
+ */
|
|
|
+ cfqq->ioprio = task_nice_ioprio(tsk);
|
|
|
+ cfqq->ioprio_class = task_nice_ioclass(tsk);
|
|
|
+ break;
|
|
|
+ case IOPRIO_CLASS_RT:
|
|
|
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
|
|
|
+ cfqq->ioprio_class = IOPRIO_CLASS_RT;
|
|
|
+ break;
|
|
|
+ case IOPRIO_CLASS_BE:
|
|
|
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
|
|
|
+ cfqq->ioprio_class = IOPRIO_CLASS_BE;
|
|
|
+ break;
|
|
|
+ case IOPRIO_CLASS_IDLE:
|
|
|
+ cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
|
|
|
+ cfqq->ioprio = 7;
|
|
|
+ cfq_clear_cfqq_idle_window(cfqq);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * keep track of original prio settings in case we have to temporarily
|
|
|
+ * elevate the priority of this queue
|
|
|
+ */
|
|
|
+ cfqq->org_ioprio = cfqq->ioprio;
|
|
|
+ cfq_clear_cfqq_prio_changed(cfqq);
|
|
|
+}
|
|
|
+
|
|
|
+static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
|
|
|
+{
|
|
|
+ int ioprio = cic->icq.ioc->ioprio;
|
|
|
+ struct cfq_data *cfqd = cic_to_cfqd(cic);
|
|
|
+ struct cfq_queue *cfqq;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check whether ioprio has changed. The condition may trigger
|
|
|
+ * spuriously on a newly created cic but there's no harm.
|
|
|
+ */
|
|
|
+ if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
|
|
|
+ return;
|
|
|
+
|
|
|
+ cfqq = cic->cfqq[BLK_RW_ASYNC];
|
|
|
+ if (cfqq) {
|
|
|
+ struct cfq_queue *new_cfqq;
|
|
|
+ new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
|
|
|
+ GFP_ATOMIC);
|
|
|
+ if (new_cfqq) {
|
|
|
+ cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
|
|
|
+ cfq_put_queue(cfqq);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ cfqq = cic->cfqq[BLK_RW_SYNC];
|
|
|
+ if (cfqq)
|
|
|
+ cfq_mark_cfqq_prio_changed(cfqq);
|
|
|
+
|
|
|
+ cic->ioprio = ioprio;
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
+ pid_t pid, bool is_sync)
|
|
|
+{
|
|
|
+ RB_CLEAR_NODE(&cfqq->rb_node);
|
|
|
+ RB_CLEAR_NODE(&cfqq->p_node);
|
|
|
+ INIT_LIST_HEAD(&cfqq->fifo);
|
|
|
+
|
|
|
+ cfqq->ref = 0;
|
|
|
+ cfqq->cfqd = cfqd;
|
|
|
+
|
|
|
+ cfq_mark_cfqq_prio_changed(cfqq);
|
|
|
+
|
|
|
+ if (is_sync) {
|
|
|
+ if (!cfq_class_idle(cfqq))
|
|
|
+ cfq_mark_cfqq_idle_window(cfqq);
|