|
@@ -327,3 +327,82 @@ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
|
|
|
* holding both q and ioc locks, and we're holding q lock - if we
|
|
|
* find a icq which points to us, it's guaranteed to be valid.
|
|
|
*/
|
|
|
+ rcu_read_lock();
|
|
|
+ icq = rcu_dereference(ioc->icq_hint);
|
|
|
+ if (icq && icq->q == q)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ icq = radix_tree_lookup(&ioc->icq_tree, q->id);
|
|
|
+ if (icq && icq->q == q)
|
|
|
+ rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
|
|
|
+ else
|
|
|
+ icq = NULL;
|
|
|
+out:
|
|
|
+ rcu_read_unlock();
|
|
|
+ return icq;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(ioc_lookup_icq);
|
|
|
+
|
|
|
+/**
|
|
|
+ * ioc_create_icq - create and link io_cq
|
|
|
+ * @ioc: io_context of interest
|
|
|
+ * @q: request_queue of interest
|
|
|
+ * @gfp_mask: allocation mask
|
|
|
+ *
|
|
|
+ * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
|
|
|
+ * will be created using @gfp_mask.
|
|
|
+ *
|
|
|
+ * The caller is responsible for ensuring @ioc won't go away and @q is
|
|
|
+ * alive and will stay alive until this function returns.
|
|
|
+ */
|
|
|
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
|
|
|
+ gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ struct elevator_type *et = q->elevator->type;
|
|
|
+ struct io_cq *icq;
|
|
|
+
|
|
|
+ /* allocate stuff */
|
|
|
+ icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
|
|
|
+ q->node);
|
|
|
+ if (!icq)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (radix_tree_preload(gfp_mask) < 0) {
|
|
|
+ kmem_cache_free(et->icq_cache, icq);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ icq->ioc = ioc;
|
|
|
+ icq->q = q;
|
|
|
+ INIT_LIST_HEAD(&icq->q_node);
|
|
|
+ INIT_HLIST_NODE(&icq->ioc_node);
|
|
|
+
|
|
|
+ /* lock both q and ioc and try to link @icq */
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+ spin_lock(&ioc->lock);
|
|
|
+
|
|
|
+ if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
|
|
|
+ hlist_add_head(&icq->ioc_node, &ioc->icq_list);
|
|
|
+ list_add(&icq->q_node, &q->icq_list);
|
|
|
+ if (et->ops.elevator_init_icq_fn)
|
|
|
+ et->ops.elevator_init_icq_fn(icq);
|
|
|
+ } else {
|
|
|
+ kmem_cache_free(et->icq_cache, icq);
|
|
|
+ icq = ioc_lookup_icq(ioc, q);
|
|
|
+ if (!icq)
|
|
|
+ printk(KERN_ERR "cfq: icq link failed!\n");
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock(&ioc->lock);
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+ radix_tree_preload_end();
|
|
|
+ return icq;
|
|
|
+}
|
|
|
+
|
|
|
+static int __init blk_ioc_init(void)
|
|
|
+{
|
|
|
+ iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
|
|
+ sizeof(struct io_context), 0, SLAB_PANIC, NULL);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+subsys_initcall(blk_ioc_init);
|