|
@@ -2390,3 +2390,65 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
|
|
*/
|
|
|
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
|
|
|
{
|
|
|
+ struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
|
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
|
|
+
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
|
|
|
+
|
|
|
+ cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
|
|
|
+ cfq_remove_request(rq);
|
|
|
+ cfqq->dispatched++;
|
|
|
+ (RQ_CFQG(rq))->dispatched++;
|
|
|
+ elv_dispatch_sort(q, rq);
|
|
|
+
|
|
|
+ cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
|
|
|
+ cfqq->nr_sectors += blk_rq_sectors(rq);
|
|
|
+ cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * return expired entry, or NULL to just start from scratch in rbtree
|
|
|
+ */
|
|
|
+static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ struct request *rq = NULL;
|
|
|
+
|
|
|
+ if (cfq_cfqq_fifo_expire(cfqq))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ cfq_mark_cfqq_fifo_expire(cfqq);
|
|
|
+
|
|
|
+ if (list_empty(&cfqq->fifo))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ rq = rq_entry_fifo(cfqq->fifo.next);
|
|
|
+ if (time_before(jiffies, rq_fifo_time(rq)))
|
|
|
+ rq = NULL;
|
|
|
+
|
|
|
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
|
|
|
+ return rq;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int
|
|
|
+cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ const int base_rq = cfqd->cfq_slice_async_rq;
|
|
|
+
|
|
|
+ WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
|
|
|
+
|
|
|
+ return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Must be called with the queue_lock held.
|
|
|
+ */
|
|
|
+static int cfqq_process_refs(struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ int process_refs, io_refs;
|
|
|
+
|
|
|
+ io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
|
|
|
+ process_refs = cfqq->ref - io_refs;
|
|
|
+ BUG_ON(process_refs < 0);
|
|
|
+ return process_refs;
|
|
|
+}
|
|
|
+
|