| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329 | 
							- /*
 
-  * Functions related to io context handling
 
-  */
 
- #include <linux/kernel.h>
 
- #include <linux/module.h>
 
- #include <linux/init.h>
 
- #include <linux/bio.h>
 
- #include <linux/blkdev.h>
 
- #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
 
- #include <linux/slab.h>
 
- #include "blk.h"
 
- /*
 
-  * For io context allocations
 
-  */
 
- static struct kmem_cache *iocontext_cachep;
 
- /**
 
-  * get_io_context - increment reference count to io_context
 
-  * @ioc: io_context to get
 
-  *
 
-  * Increment reference count to @ioc.
 
-  */
 
- void get_io_context(struct io_context *ioc)
 
- {
 
- 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 
- 	atomic_long_inc(&ioc->refcount);
 
- }
 
- EXPORT_SYMBOL(get_io_context);
 
- static void icq_free_icq_rcu(struct rcu_head *head)
 
- {
 
- 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
 
- 	kmem_cache_free(icq->__rcu_icq_cache, icq);
 
- }
 
- /* Exit an icq. Called with both ioc and q locked. */
 
- static void ioc_exit_icq(struct io_cq *icq)
 
- {
 
- 	struct elevator_type *et = icq->q->elevator->type;
 
- 	if (icq->flags & ICQ_EXITED)
 
- 		return;
 
- 	if (et->ops.elevator_exit_icq_fn)
 
- 		et->ops.elevator_exit_icq_fn(icq);
 
- 	icq->flags |= ICQ_EXITED;
 
- }
 
- /* Release an icq.  Called with both ioc and q locked. */
 
- static void ioc_destroy_icq(struct io_cq *icq)
 
- {
 
- 	struct io_context *ioc = icq->ioc;
 
- 	struct request_queue *q = icq->q;
 
- 	struct elevator_type *et = q->elevator->type;
 
- 	lockdep_assert_held(&ioc->lock);
 
- 	lockdep_assert_held(q->queue_lock);
 
- 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
 
- 	hlist_del_init(&icq->ioc_node);
 
- 	list_del_init(&icq->q_node);
 
- 	/*
 
- 	 * Both setting lookup hint to and clearing it from @icq are done
 
- 	 * under queue_lock.  If it's not pointing to @icq now, it never
 
- 	 * will.  Hint assignment itself can race safely.
 
- 	 */
 
- 	if (rcu_dereference_raw(ioc->icq_hint) == icq)
 
- 		rcu_assign_pointer(ioc->icq_hint, NULL);
 
- 	ioc_exit_icq(icq);
 
- 	/*
 
- 	 * @icq->q might have gone away by the time RCU callback runs
 
- 	 * making it impossible to determine icq_cache.  Record it in @icq.
 
- 	 */
 
- 	icq->__rcu_icq_cache = et->icq_cache;
 
- 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
 
- }
 
- /*
 
-  * Slow path for ioc release in put_io_context().  Performs double-lock
 
-  * dancing to unlink all icq's and then frees ioc.
 
-  */
 
- static void ioc_release_fn(struct work_struct *work)
 
- {
 
- 	struct io_context *ioc = container_of(work, struct io_context,
 
- 					      release_work);
 
- 	unsigned long flags;
 
- 	/*
 
- 	 * Exiting icq may call into put_io_context() through elevator
 
- 	 * which will trigger lockdep warning.  The ioc's are guaranteed to
 
- 	 * be different, use a different locking subclass here.  Use
 
- 	 * irqsave variant as there's no spin_lock_irq_nested().
 
- 	 */
 
- 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
 
- 	while (!hlist_empty(&ioc->icq_list)) {
 
- 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
 
- 						struct io_cq, ioc_node);
 
- 		struct request_queue *q = icq->q;
 
- 		if (spin_trylock(q->queue_lock)) {
 
- 			ioc_destroy_icq(icq);
 
- 			spin_unlock(q->queue_lock);
 
- 		} else {
 
- 			spin_unlock_irqrestore(&ioc->lock, flags);
 
- 			cpu_relax();
 
- 			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
 
- 		}
 
- 	}
 
- 	spin_unlock_irqrestore(&ioc->lock, flags);
 
- 	kmem_cache_free(iocontext_cachep, ioc);
 
- }
 
- /**
 
-  * put_io_context - put a reference of io_context
 
-  * @ioc: io_context to put
 
-  *
 
-  * Decrement reference count of @ioc and release it if the count reaches
 
-  * zero.
 
-  */
 
- void put_io_context(struct io_context *ioc)
 
- {
 
- 	unsigned long flags;
 
- 	bool free_ioc = false;
 
- 	if (ioc == NULL)
 
- 		return;
 
- 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 
- 	/*
 
- 	 * Releasing ioc requires reverse order double locking and we may
 
- 	 * already be holding a queue_lock.  Do it asynchronously from wq.
 
- 	 */
 
- 	if (atomic_long_dec_and_test(&ioc->refcount)) {
 
- 		spin_lock_irqsave(&ioc->lock, flags);
 
- 		if (!hlist_empty(&ioc->icq_list))
 
- 			schedule_work(&ioc->release_work);
 
- 		else
 
- 			free_ioc = true;
 
- 		spin_unlock_irqrestore(&ioc->lock, flags);
 
- 	}
 
- 	if (free_ioc)
 
- 		kmem_cache_free(iocontext_cachep, ioc);
 
- }
 
- EXPORT_SYMBOL(put_io_context);
 
- /**
 
-  * put_io_context_active - put active reference on ioc
 
-  * @ioc: ioc of interest
 
-  *
 
-  * Undo get_io_context_active().  If active reference reaches zero after
 
-  * put, @ioc can never issue further IOs and ioscheds are notified.
 
-  */
 
- void put_io_context_active(struct io_context *ioc)
 
- {
 
- 	struct hlist_node *n;
 
- 	unsigned long flags;
 
- 	struct io_cq *icq;
 
- 	if (!atomic_dec_and_test(&ioc->active_ref)) {
 
- 		put_io_context(ioc);
 
- 		return;
 
- 	}
 
- 	/*
 
- 	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
 
- 	 * reverse double locking.  Read comment in ioc_release_fn() for
 
- 	 * explanation on the nested locking annotation.
 
- 	 */
 
- retry:
 
- 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
 
- 	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
 
- 		if (icq->flags & ICQ_EXITED)
 
- 			continue;
 
- 		if (spin_trylock(icq->q->queue_lock)) {
 
- 			ioc_exit_icq(icq);
 
- 			spin_unlock(icq->q->queue_lock);
 
- 		} else {
 
- 			spin_unlock_irqrestore(&ioc->lock, flags);
 
- 			cpu_relax();
 
- 			goto retry;
 
- 		}
 
- 	}
 
- 	spin_unlock_irqrestore(&ioc->lock, flags);
 
- 	put_io_context(ioc);
 
- }
 
- /* Called by the exiting task */
 
- void exit_io_context(struct task_struct *task)
 
- {
 
- 	struct io_context *ioc;
 
- 	task_lock(task);
 
- 	ioc = task->io_context;
 
- 	task->io_context = NULL;
 
- 	task_unlock(task);
 
- 	atomic_dec(&ioc->nr_tasks);
 
- 	put_io_context_active(ioc);
 
- }
 
- /**
 
-  * ioc_clear_queue - break any ioc association with the specified queue
 
-  * @q: request_queue being cleared
 
-  *
 
-  * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
 
-  */
 
- void ioc_clear_queue(struct request_queue *q)
 
- {
 
- 	lockdep_assert_held(q->queue_lock);
 
- 	while (!list_empty(&q->icq_list)) {
 
- 		struct io_cq *icq = list_entry(q->icq_list.next,
 
- 					       struct io_cq, q_node);
 
- 		struct io_context *ioc = icq->ioc;
 
- 		spin_lock(&ioc->lock);
 
- 		ioc_destroy_icq(icq);
 
- 		spin_unlock(&ioc->lock);
 
- 	}
 
- }
 
- int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
 
- {
 
- 	struct io_context *ioc;
 
- 	int ret;
 
- 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
 
- 				    node);
 
- 	if (unlikely(!ioc))
 
- 		return -ENOMEM;
 
- 	/* initialize */
 
- 	atomic_long_set(&ioc->refcount, 1);
 
- 	atomic_set(&ioc->nr_tasks, 1);
 
- 	atomic_set(&ioc->active_ref, 1);
 
- 	spin_lock_init(&ioc->lock);
 
- 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
 
- 	INIT_HLIST_HEAD(&ioc->icq_list);
 
- 	INIT_WORK(&ioc->release_work, ioc_release_fn);
 
- 	/*
 
- 	 * Try to install.  ioc shouldn't be installed if someone else
 
- 	 * already did or @task, which isn't %current, is exiting.  Note
 
- 	 * that we need to allow ioc creation on exiting %current as exit
 
- 	 * path may issue IOs from e.g. exit_files().  The exit path is
 
- 	 * responsible for not issuing IO after exit_io_context().
 
- 	 */
 
- 	task_lock(task);
 
- 	if (!task->io_context &&
 
- 	    (task == current || !(task->flags & PF_EXITING)))
 
- 		task->io_context = ioc;
 
- 	else
 
- 		kmem_cache_free(iocontext_cachep, ioc);
 
- 	ret = task->io_context ? 0 : -EBUSY;
 
- 	task_unlock(task);
 
- 	return ret;
 
- }
 
- /**
 
-  * get_task_io_context - get io_context of a task
 
-  * @task: task of interest
 
-  * @gfp_flags: allocation flags, used if allocation is necessary
 
-  * @node: allocation node, used if allocation is necessary
 
-  *
 
-  * Return io_context of @task.  If it doesn't exist, it is created with
 
-  * @gfp_flags and @node.  The returned io_context has its reference count
 
-  * incremented.
 
-  *
 
-  * This function always goes through task_lock() and it's better to use
 
-  * %current->io_context + get_io_context() for %current.
 
-  */
 
- struct io_context *get_task_io_context(struct task_struct *task,
 
- 				       gfp_t gfp_flags, int node)
 
- {
 
- 	struct io_context *ioc;
 
- 	might_sleep_if(gfp_flags & __GFP_WAIT);
 
- 	do {
 
- 		task_lock(task);
 
- 		ioc = task->io_context;
 
- 		if (likely(ioc)) {
 
- 			get_io_context(ioc);
 
- 			task_unlock(task);
 
- 			return ioc;
 
- 		}
 
- 		task_unlock(task);
 
- 	} while (!create_task_io_context(task, gfp_flags, node));
 
- 	return NULL;
 
- }
 
- EXPORT_SYMBOL(get_task_io_context);
 
- /**
 
-  * ioc_lookup_icq - lookup io_cq from ioc
 
-  * @ioc: the associated io_context
 
-  * @q: the associated request_queue
 
-  *
 
-  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
 
-  * with @q->queue_lock held.
 
-  */
 
- struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
 
- {
 
- 	struct io_cq *icq;
 
- 	lockdep_assert_held(q->queue_lock);
 
- 	/*
 
- 	 * icq's are indexed from @ioc using radix tree and hint pointer,
 
- 	 * both of which are protected with RCU.  All removals are done
 
- 	 * holding both q and ioc locks, and we're holding q lock - if we
 
- 	 * find a icq which points to us, it's guaranteed to be valid.
 
- 	 */
 
 
  |