|
@@ -1877,3 +1877,200 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
|
|
|
struct task_struct *tsk = current;
|
|
|
struct cfq_io_cq *cic;
|
|
|
struct cfq_queue *cfqq;
|
|
|
+
|
|
|
+ cic = cfq_cic_lookup(cfqd, tsk->io_context);
|
|
|
+ if (!cic)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
|
|
|
+ if (cfqq) {
|
|
|
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
|
|
|
+
|
|
|
+ return elv_rb_find(&cfqq->sort_list, sector);
|
|
|
+ }
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_activate_request(struct request_queue *q, struct request *rq)
|
|
|
+{
|
|
|
+ struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
|
+
|
|
|
+ cfqd->rq_in_driver++;
|
|
|
+ cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
|
|
|
+ cfqd->rq_in_driver);
|
|
|
+
|
|
|
+ cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
|
|
|
+{
|
|
|
+ struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
|
+
|
|
|
+ WARN_ON(!cfqd->rq_in_driver);
|
|
|
+ cfqd->rq_in_driver--;
|
|
|
+ cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
|
|
|
+ cfqd->rq_in_driver);
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_remove_request(struct request *rq)
|
|
|
+{
|
|
|
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
|
|
+
|
|
|
+ if (cfqq->next_rq == rq)
|
|
|
+ cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
|
|
|
+
|
|
|
+ list_del_init(&rq->queuelist);
|
|
|
+ cfq_del_rq_rb(rq);
|
|
|
+
|
|
|
+ cfqq->cfqd->rq_queued--;
|
|
|
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
|
|
|
+ if (rq->cmd_flags & REQ_PRIO) {
|
|
|
+ WARN_ON(!cfqq->prio_pending);
|
|
|
+ cfqq->prio_pending--;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int cfq_merge(struct request_queue *q, struct request **req,
|
|
|
+ struct bio *bio)
|
|
|
+{
|
|
|
+ struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
|
+ struct request *__rq;
|
|
|
+
|
|
|
+ __rq = cfq_find_rq_fmerge(cfqd, bio);
|
|
|
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
|
|
|
+ *req = __rq;
|
|
|
+ return ELEVATOR_FRONT_MERGE;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ELEVATOR_NO_MERGE;
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_merged_request(struct request_queue *q, struct request *req,
|
|
|
+ int type)
|
|
|
+{
|
|
|
+ if (type == ELEVATOR_FRONT_MERGE) {
|
|
|
+ struct cfq_queue *cfqq = RQ_CFQQ(req);
|
|
|
+
|
|
|
+ cfq_reposition_rq_rb(cfqq, req);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_bio_merged(struct request_queue *q, struct request *req,
|
|
|
+ struct bio *bio)
|
|
|
+{
|
|
|
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+cfq_merged_requests(struct request_queue *q, struct request *rq,
|
|
|
+ struct request *next)
|
|
|
+{
|
|
|
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
|
|
+ struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * reposition in fifo if next is older than rq
|
|
|
+ */
|
|
|
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
|
|
|
+ time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
|
|
|
+ cfqq == RQ_CFQQ(next)) {
|
|
|
+ list_move(&rq->queuelist, &next->queuelist);
|
|
|
+ rq_set_fifo_time(rq, rq_fifo_time(next));
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cfqq->next_rq == next)
|
|
|
+ cfqq->next_rq = rq;
|
|
|
+ cfq_remove_request(next);
|
|
|
+ cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
|
|
|
+
|
|
|
+ cfqq = RQ_CFQQ(next);
|
|
|
+ /*
|
|
|
+ * all requests of this queue are merged to other queues, delete it
|
|
|
+ * from the service tree. If it's the active_queue,
|
|
|
+ * cfq_dispatch_requests() will choose to expire it or do idle
|
|
|
+ */
|
|
|
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
|
|
|
+ cfqq != cfqd->active_queue)
|
|
|
+ cfq_del_cfqq_rr(cfqd, cfqq);
|
|
|
+}
|
|
|
+
|
|
|
+static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
|
|
+ struct bio *bio)
|
|
|
+{
|
|
|
+ struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
|
+ struct cfq_io_cq *cic;
|
|
|
+ struct cfq_queue *cfqq;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Disallow merge of a sync bio into an async request.
|
|
|
+ */
|
|
|
+ if (cfq_bio_sync(bio) && !rq_is_sync(rq))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Lookup the cfqq that this bio will be queued with and allow
|
|
|
+ * merge only if rq is queued there.
|
|
|
+ */
|
|
|
+ cic = cfq_cic_lookup(cfqd, current->io_context);
|
|
|
+ if (!cic)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
|
|
|
+ return cfqq == RQ_CFQQ(rq);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ del_timer(&cfqd->idle_slice_timer);
|
|
|
+ cfqg_stats_update_idle_time(cfqq->cfqg);
|
|
|
+}
|
|
|
+
|
|
|
+static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
|
|
+ struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ if (cfqq) {
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
|
|
|
+ cfqd->serving_prio, cfqd->serving_type);
|
|
|
+ cfqg_stats_update_avg_queue_size(cfqq->cfqg);
|
|
|
+ cfqq->slice_start = 0;
|
|
|
+ cfqq->dispatch_start = jiffies;
|
|
|
+ cfqq->allocated_slice = 0;
|
|
|
+ cfqq->slice_end = 0;
|
|
|
+ cfqq->slice_dispatch = 0;
|
|
|
+ cfqq->nr_sectors = 0;
|
|
|
+
|
|
|
+ cfq_clear_cfqq_wait_request(cfqq);
|
|
|
+ cfq_clear_cfqq_must_dispatch(cfqq);
|
|
|
+ cfq_clear_cfqq_must_alloc_slice(cfqq);
|
|
|
+ cfq_clear_cfqq_fifo_expire(cfqq);
|
|
|
+ cfq_mark_cfqq_slice_new(cfqq);
|
|
|
+
|
|
|
+ cfq_del_timer(cfqd, cfqq);
|
|
|
+ }
|
|
|
+
|
|
|
+ cfqd->active_queue = cfqq;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * current cfqq expired its slice (or was too idle), select new one
|
|
|
+ */
|
|
|
+static void
|
|
|
+__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
+ bool timed_out)
|
|
|
+{
|
|
|
+ cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
|
|
|
+
|
|
|
+ if (cfq_cfqq_wait_request(cfqq))
|
|
|
+ cfq_del_timer(cfqd, cfqq);
|
|
|
+
|
|
|
+ cfq_clear_cfqq_wait_request(cfqq);
|
|
|
+ cfq_clear_cfqq_wait_busy(cfqq);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this cfqq is shared between multiple processes, check to
|
|
|
+ * make sure that those processes are still issuing I/Os within
|
|
|
+ * the mean seek distance. If not, it may be time to break the
|
|
|
+ * queues apart again.
|
|
|
+ */
|
|
|
+ if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
|