|
@@ -1117,3 +1117,125 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
BUG_ON(RB_EMPTY_NODE(&last->rb_node));
|
|
|
|
|
|
if (rbprev)
|
|
|
+ prev = rb_entry_rq(rbprev);
|
|
|
+
|
|
|
+ if (rbnext)
|
|
|
+ next = rb_entry_rq(rbnext);
|
|
|
+ else {
|
|
|
+ rbnext = rb_first(&cfqq->sort_list);
|
|
|
+ if (rbnext && rbnext != &last->rb_node)
|
|
|
+ next = rb_entry_rq(rbnext);
|
|
|
+ }
|
|
|
+
|
|
|
+ return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
|
|
|
+ struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * just an approximation, should be ok.
|
|
|
+ */
|
|
|
+ return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
|
|
|
+ cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
|
|
|
+}
|
|
|
+
|
|
|
+static inline s64
|
|
|
+cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ return cfqg->vdisktime - st->min_vdisktime;
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ struct rb_node **node = &st->rb.rb_node;
|
|
|
+ struct rb_node *parent = NULL;
|
|
|
+ struct cfq_group *__cfqg;
|
|
|
+ s64 key = cfqg_key(st, cfqg);
|
|
|
+ int left = 1;
|
|
|
+
|
|
|
+ while (*node != NULL) {
|
|
|
+ parent = *node;
|
|
|
+ __cfqg = rb_entry_cfqg(parent);
|
|
|
+
|
|
|
+ if (key < cfqg_key(st, __cfqg))
|
|
|
+ node = &parent->rb_left;
|
|
|
+ else {
|
|
|
+ node = &parent->rb_right;
|
|
|
+ left = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (left)
|
|
|
+ st->left = &cfqg->rb_node;
|
|
|
+
|
|
|
+ rb_link_node(&cfqg->rb_node, parent, node);
|
|
|
+ rb_insert_color(&cfqg->rb_node, &st->rb);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+cfq_update_group_weight(struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
|
|
+ if (cfqg->new_weight) {
|
|
|
+ cfqg->weight = cfqg->new_weight;
|
|
|
+ cfqg->new_weight = 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
|
|
+
|
|
|
+ cfq_update_group_weight(cfqg);
|
|
|
+ __cfq_group_service_tree_add(st, cfqg);
|
|
|
+ st->total_weight += cfqg->weight;
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
|
|
+ struct cfq_group *__cfqg;
|
|
|
+ struct rb_node *n;
|
|
|
+
|
|
|
+ cfqg->nr_cfqq++;
|
|
|
+ if (!RB_EMPTY_NODE(&cfqg->rb_node))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Currently put the group at the end. Later implement something
|
|
|
+ * so that groups get lesser vtime based on their weights, so that
|
|
|
+ * if group does not loose all if it was not continuously backlogged.
|
|
|
+ */
|
|
|
+ n = rb_last(&st->rb);
|
|
|
+ if (n) {
|
|
|
+ __cfqg = rb_entry_cfqg(n);
|
|
|
+ cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
|
|
|
+ } else
|
|
|
+ cfqg->vdisktime = st->min_vdisktime;
|
|
|
+ cfq_group_service_tree_add(st, cfqg);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ st->total_weight -= cfqg->weight;
|
|
|
+ if (!RB_EMPTY_NODE(&cfqg->rb_node))
|
|
|
+ cfq_rb_erase(&cfqg->rb_node, st);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
|
|
+
|
|
|
+ BUG_ON(cfqg->nr_cfqq < 1);
|
|
|
+ cfqg->nr_cfqq--;
|
|
|
+
|
|
|
+ /* If there are other cfq queues under this group, don't delete it */
|
|
|
+ if (cfqg->nr_cfqq)
|
|
|
+ return;
|
|
|
+
|