|
@@ -75,3 +75,200 @@ static void ioc_destroy_icq(struct io_cq *icq)
|
|
ioc_exit_icq(icq);
|
|
ioc_exit_icq(icq);
|
|
|
|
|
|
/*
|
|
/*
|
|
|
|
+ * @icq->q might have gone away by the time RCU callback runs
|
|
|
|
+ * making it impossible to determine icq_cache. Record it in @icq.
|
|
|
|
+ */
|
|
|
|
+ icq->__rcu_icq_cache = et->icq_cache;
|
|
|
|
+ call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Slow path for ioc release in put_io_context(). Performs double-lock
|
|
|
|
+ * dancing to unlink all icq's and then frees ioc.
|
|
|
|
+ */
|
|
|
|
+static void ioc_release_fn(struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ struct io_context *ioc = container_of(work, struct io_context,
|
|
|
|
+ release_work);
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Exiting icq may call into put_io_context() through elevator
|
|
|
|
+ * which will trigger lockdep warning. The ioc's are guaranteed to
|
|
|
|
+ * be different, use a different locking subclass here. Use
|
|
|
|
+ * irqsave variant as there's no spin_lock_irq_nested().
|
|
|
|
+ */
|
|
|
|
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
|
|
|
+
|
|
|
|
+ while (!hlist_empty(&ioc->icq_list)) {
|
|
|
|
+ struct io_cq *icq = hlist_entry(ioc->icq_list.first,
|
|
|
|
+ struct io_cq, ioc_node);
|
|
|
|
+ struct request_queue *q = icq->q;
|
|
|
|
+
|
|
|
|
+ if (spin_trylock(q->queue_lock)) {
|
|
|
|
+ ioc_destroy_icq(icq);
|
|
|
|
+ spin_unlock(q->queue_lock);
|
|
|
|
+ } else {
|
|
|
|
+ spin_unlock_irqrestore(&ioc->lock, flags);
|
|
|
|
+ cpu_relax();
|
|
|
|
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&ioc->lock, flags);
|
|
|
|
+
|
|
|
|
+ kmem_cache_free(iocontext_cachep, ioc);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * put_io_context - put a reference of io_context
|
|
|
|
+ * @ioc: io_context to put
|
|
|
|
+ *
|
|
|
|
+ * Decrement reference count of @ioc and release it if the count reaches
|
|
|
|
+ * zero.
|
|
|
|
+ */
|
|
|
|
+void put_io_context(struct io_context *ioc)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ bool free_ioc = false;
|
|
|
|
+
|
|
|
|
+ if (ioc == NULL)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Releasing ioc requires reverse order double locking and we may
|
|
|
|
+ * already be holding a queue_lock. Do it asynchronously from wq.
|
|
|
|
+ */
|
|
|
|
+ if (atomic_long_dec_and_test(&ioc->refcount)) {
|
|
|
|
+ spin_lock_irqsave(&ioc->lock, flags);
|
|
|
|
+ if (!hlist_empty(&ioc->icq_list))
|
|
|
|
+ schedule_work(&ioc->release_work);
|
|
|
|
+ else
|
|
|
|
+ free_ioc = true;
|
|
|
|
+ spin_unlock_irqrestore(&ioc->lock, flags);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (free_ioc)
|
|
|
|
+ kmem_cache_free(iocontext_cachep, ioc);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(put_io_context);
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * put_io_context_active - put active reference on ioc
|
|
|
|
+ * @ioc: ioc of interest
|
|
|
|
+ *
|
|
|
|
+ * Undo get_io_context_active(). If active reference reaches zero after
|
|
|
|
+ * put, @ioc can never issue further IOs and ioscheds are notified.
|
|
|
|
+ */
|
|
|
|
+void put_io_context_active(struct io_context *ioc)
|
|
|
|
+{
|
|
|
|
+ struct hlist_node *n;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ struct io_cq *icq;
|
|
|
|
+
|
|
|
|
+ if (!atomic_dec_and_test(&ioc->active_ref)) {
|
|
|
|
+ put_io_context(ioc);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Need ioc lock to walk icq_list and q lock to exit icq. Perform
|
|
|
|
+ * reverse double locking. Read comment in ioc_release_fn() for
|
|
|
|
+ * explanation on the nested locking annotation.
|
|
|
|
+ */
|
|
|
|
+retry:
|
|
|
|
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
|
|
|
+ hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
|
|
|
|
+ if (icq->flags & ICQ_EXITED)
|
|
|
|
+ continue;
|
|
|
|
+ if (spin_trylock(icq->q->queue_lock)) {
|
|
|
|
+ ioc_exit_icq(icq);
|
|
|
|
+ spin_unlock(icq->q->queue_lock);
|
|
|
|
+ } else {
|
|
|
|
+ spin_unlock_irqrestore(&ioc->lock, flags);
|
|
|
|
+ cpu_relax();
|
|
|
|
+ goto retry;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irqrestore(&ioc->lock, flags);
|
|
|
|
+
|
|
|
|
+ put_io_context(ioc);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Called by the exiting task */
|
|
|
|
+void exit_io_context(struct task_struct *task)
|
|
|
|
+{
|
|
|
|
+ struct io_context *ioc;
|
|
|
|
+
|
|
|
|
+ task_lock(task);
|
|
|
|
+ ioc = task->io_context;
|
|
|
|
+ task->io_context = NULL;
|
|
|
|
+ task_unlock(task);
|
|
|
|
+
|
|
|
|
+ atomic_dec(&ioc->nr_tasks);
|
|
|
|
+ put_io_context_active(ioc);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * ioc_clear_queue - break any ioc association with the specified queue
|
|
|
|
+ * @q: request_queue being cleared
|
|
|
|
+ *
|
|
|
|
+ * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
|
|
|
|
+ */
|
|
|
|
+void ioc_clear_queue(struct request_queue *q)
|
|
|
|
+{
|
|
|
|
+ lockdep_assert_held(q->queue_lock);
|
|
|
|
+
|
|
|
|
+ while (!list_empty(&q->icq_list)) {
|
|
|
|
+ struct io_cq *icq = list_entry(q->icq_list.next,
|
|
|
|
+ struct io_cq, q_node);
|
|
|
|
+ struct io_context *ioc = icq->ioc;
|
|
|
|
+
|
|
|
|
+ spin_lock(&ioc->lock);
|
|
|
|
+ ioc_destroy_icq(icq);
|
|
|
|
+ spin_unlock(&ioc->lock);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
|
|
|
|
+{
|
|
|
|
+ struct io_context *ioc;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
|
|
|
|
+ node);
|
|
|
|
+ if (unlikely(!ioc))
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ /* initialize */
|
|
|
|
+ atomic_long_set(&ioc->refcount, 1);
|
|
|
|
+ atomic_set(&ioc->nr_tasks, 1);
|
|
|
|
+ atomic_set(&ioc->active_ref, 1);
|
|
|
|
+ spin_lock_init(&ioc->lock);
|
|
|
|
+ INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
|
|
|
|
+ INIT_HLIST_HEAD(&ioc->icq_list);
|
|
|
|
+ INIT_WORK(&ioc->release_work, ioc_release_fn);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Try to install. ioc shouldn't be installed if someone else
|
|
|
|
+ * already did or @task, which isn't %current, is exiting. Note
|
|
|
|
+ * that we need to allow ioc creation on exiting %current as exit
|
|
|
|
+ * path may issue IOs from e.g. exit_files(). The exit path is
|
|
|
|
+ * responsible for not issuing IO after exit_io_context().
|
|
|
|
+ */
|
|
|
|
+ task_lock(task);
|
|
|
|
+ if (!task->io_context &&
|
|
|
|
+ (task == current || !(task->flags & PF_EXITING)))
|
|
|
|
+ task->io_context = ioc;
|
|
|
|
+ else
|
|
|
|
+ kmem_cache_free(iocontext_cachep, ioc);
|
|
|
|
+
|
|
|
|
+ ret = task->io_context ? 0 : -EBUSY;
|
|
|
|
+
|
|
|
|
+ task_unlock(task);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|