| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677 | /* * Functions related to io context handling */#include <linux/kernel.h>#include <linux/module.h>#include <linux/init.h>#include <linux/bio.h>#include <linux/blkdev.h>#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */#include <linux/slab.h>#include "blk.h"/* * For io context allocations */static struct kmem_cache *iocontext_cachep;/** * get_io_context - increment reference count to io_context * @ioc: io_context to get * * Increment reference count to @ioc. */void get_io_context(struct io_context *ioc){	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);	atomic_long_inc(&ioc->refcount);}EXPORT_SYMBOL(get_io_context);static void icq_free_icq_rcu(struct rcu_head *head){	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);	kmem_cache_free(icq->__rcu_icq_cache, icq);}/* Exit an icq. Called with both ioc and q locked. */static void ioc_exit_icq(struct io_cq *icq){	struct elevator_type *et = icq->q->elevator->type;	if (icq->flags & ICQ_EXITED)		return;	if (et->ops.elevator_exit_icq_fn)		et->ops.elevator_exit_icq_fn(icq);	icq->flags |= ICQ_EXITED;}/* Release an icq.  Called with both ioc and q locked. */static void ioc_destroy_icq(struct io_cq *icq){	struct io_context *ioc = icq->ioc;	struct request_queue *q = icq->q;	struct elevator_type *et = q->elevator->type;	lockdep_assert_held(&ioc->lock);	lockdep_assert_held(q->queue_lock);	radix_tree_delete(&ioc->icq_tree, icq->q->id);	hlist_del_init(&icq->ioc_node);	list_del_init(&icq->q_node);	/*	 * Both setting lookup hint to and clearing it from @icq are done	 * under queue_lock.  If it's not pointing to @icq now, it never	 * will.  Hint assignment itself can race safely.	 */	if (rcu_dereference_raw(ioc->icq_hint) == icq)		rcu_assign_pointer(ioc->icq_hint, NULL);	ioc_exit_icq(icq);	/*
 |