waterTankDataOperation.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. /*
  2. * Functions related to io context handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/slab.h>
  11. #include "blk.h"
  12. /*
  13. * For io context allocations
  14. */
  15. static struct kmem_cache *iocontext_cachep;
  16. /**
  17. * get_io_context - increment reference count to io_context
  18. * @ioc: io_context to get
  19. *
  20. * Increment reference count to @ioc.
  21. */
  22. void get_io_context(struct io_context *ioc)
  23. {
  24. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  25. atomic_long_inc(&ioc->refcount);
  26. }
  27. EXPORT_SYMBOL(get_io_context);
  28. static void icq_free_icq_rcu(struct rcu_head *head)
  29. {
  30. struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
  31. kmem_cache_free(icq->__rcu_icq_cache, icq);
  32. }
  33. /* Exit an icq. Called with both ioc and q locked. */
  34. static void ioc_exit_icq(struct io_cq *icq)
  35. {
  36. struct elevator_type *et = icq->q->elevator->type;
  37. if (icq->flags & ICQ_EXITED)
  38. return;
  39. if (et->ops.elevator_exit_icq_fn)
  40. et->ops.elevator_exit_icq_fn(icq);
  41. icq->flags |= ICQ_EXITED;
  42. }
  43. /* Release an icq. Called with both ioc and q locked. */
  44. static void ioc_destroy_icq(struct io_cq *icq)
  45. {
  46. struct io_context *ioc = icq->ioc;
  47. struct request_queue *q = icq->q;
  48. struct elevator_type *et = q->elevator->type;
  49. lockdep_assert_held(&ioc->lock);
  50. lockdep_assert_held(q->queue_lock);
  51. radix_tree_delete(&ioc->icq_tree, icq->q->id);
  52. hlist_del_init(&icq->ioc_node);
  53. list_del_init(&icq->q_node);
  54. /*
  55. * Both setting lookup hint to and clearing it from @icq are done
  56. * under queue_lock. If it's not pointing to @icq now, it never
  57. * will. Hint assignment itself can race safely.
  58. */
  59. if (rcu_dereference_raw(ioc->icq_hint) == icq)
  60. rcu_assign_pointer(ioc->icq_hint, NULL);
  61. ioc_exit_icq(icq);
  62. /*
  63. * @icq->q might have gone away by the time RCU callback runs
  64. * making it impossible to determine icq_cache. Record it in @icq.
  65. */
  66. icq->__rcu_icq_cache = et->icq_cache;
  67. call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
  68. }
  69. /*
  70. * Slow path for ioc release in put_io_context(). Performs double-lock
  71. * dancing to unlink all icq's and then frees ioc.
  72. */
  73. static void ioc_release_fn(struct work_struct *work)
  74. {
  75. struct io_context *ioc = container_of(work, struct io_context,
  76. release_work);
  77. unsigned long flags;
  78. /*
  79. * Exiting icq may call into put_io_context() through elevator
  80. * which will trigger lockdep warning. The ioc's are guaranteed to
  81. * be different, use a different locking subclass here. Use
  82. * irqsave variant as there's no spin_lock_irq_nested().
  83. */
  84. spin_lock_irqsave_nested(&ioc->lock, flags, 1);
  85. while (!hlist_empty(&ioc->icq_list)) {
  86. struct io_cq *icq = hlist_entry(ioc->icq_list.first,
  87. struct io_cq, ioc_node);
  88. struct request_queue *q = icq->q;
  89. if (spin_trylock(q->queue_lock)) {
  90. ioc_destroy_icq(icq);
  91. spin_unlock(q->queue_lock);
  92. } else {
  93. spin_unlock_irqrestore(&ioc->lock, flags);
  94. cpu_relax();
  95. spin_lock_irqsave_nested(&ioc->lock, flags, 1);
  96. }
  97. }
  98. spin_unlock_irqrestore(&ioc->lock, flags);
  99. kmem_cache_free(iocontext_cachep, ioc);
  100. }
  101. /**
  102. * put_io_context - put a reference of io_context
  103. * @ioc: io_context to put
  104. *
  105. * Decrement reference count of @ioc and release it if the count reaches
  106. * zero.
  107. */
  108. void put_io_context(struct io_context *ioc)
  109. {
  110. unsigned long flags;
  111. bool free_ioc = false;
  112. if (ioc == NULL)
  113. return;
  114. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  115. /*
  116. * Releasing ioc requires reverse order double locking and we may
  117. * already be holding a queue_lock. Do it asynchronously from wq.
  118. */
  119. if (atomic_long_dec_and_test(&ioc->refcount)) {
  120. spin_lock_irqsave(&ioc->lock, flags);
  121. if (!hlist_empty(&ioc->icq_list))
  122. schedule_work(&ioc->release_work);
  123. else
  124. free_ioc = true;
  125. spin_unlock_irqrestore(&ioc->lock, flags);
  126. }
  127. if (free_ioc)
  128. kmem_cache_free(iocontext_cachep, ioc);
  129. }
  130. EXPORT_SYMBOL(put_io_context);
  131. /**
  132. * put_io_context_active - put active reference on ioc
  133. * @ioc: ioc of interest
  134. *
  135. * Undo get_io_context_active(). If active reference reaches zero after
  136. * put, @ioc can never issue further IOs and ioscheds are notified.
  137. */
  138. void put_io_context_active(struct io_context *ioc)
  139. {
  140. struct hlist_node *n;
  141. unsigned long flags;
  142. struct io_cq *icq;
  143. if (!atomic_dec_and_test(&ioc->active_ref)) {
  144. put_io_context(ioc);
  145. return;
  146. }
  147. /*
  148. * Need ioc lock to walk icq_list and q lock to exit icq. Perform
  149. * reverse double locking. Read comment in ioc_release_fn() for
  150. * explanation on the nested locking annotation.
  151. */
  152. retry:
  153. spin_lock_irqsave_nested(&ioc->lock, flags, 1);
  154. hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
  155. if (icq->flags & ICQ_EXITED)
  156. continue;
  157. if (spin_trylock(icq->q->queue_lock)) {
  158. ioc_exit_icq(icq);
  159. spin_unlock(icq->q->queue_lock);
  160. } else {
  161. spin_unlock_irqrestore(&ioc->lock, flags);
  162. cpu_relax();
  163. goto retry;
  164. }
  165. }
  166. spin_unlock_irqrestore(&ioc->lock, flags);
  167. put_io_context(ioc);
  168. }
  169. /* Called by the exiting task */
  170. void exit_io_context(struct task_struct *task)
  171. {
  172. struct io_context *ioc;
  173. task_lock(task);
  174. ioc = task->io_context;
  175. task->io_context = NULL;
  176. task_unlock(task);
  177. atomic_dec(&ioc->nr_tasks);
  178. put_io_context_active(ioc);
  179. }
  180. /**
  181. * ioc_clear_queue - break any ioc association with the specified queue
  182. * @q: request_queue being cleared
  183. *
  184. * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
  185. */
  186. void ioc_clear_queue(struct request_queue *q)
  187. {
  188. lockdep_assert_held(q->queue_lock);
  189. while (!list_empty(&q->icq_list)) {
  190. struct io_cq *icq = list_entry(q->icq_list.next,
  191. struct io_cq, q_node);
  192. struct io_context *ioc = icq->ioc;
  193. spin_lock(&ioc->lock);
  194. ioc_destroy_icq(icq);
  195. spin_unlock(&ioc->lock);
  196. }
  197. }
  198. int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
  199. {
  200. struct io_context *ioc;
  201. int ret;
  202. ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
  203. node);
  204. if (unlikely(!ioc))
  205. return -ENOMEM;
  206. /* initialize */
  207. atomic_long_set(&ioc->refcount, 1);
  208. atomic_set(&ioc->nr_tasks, 1);
  209. atomic_set(&ioc->active_ref, 1);
  210. spin_lock_init(&ioc->lock);
  211. INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
  212. INIT_HLIST_HEAD(&ioc->icq_list);
  213. INIT_WORK(&ioc->release_work, ioc_release_fn);
  214. /*
  215. * Try to install. ioc shouldn't be installed if someone else
  216. * already did or @task, which isn't %current, is exiting. Note
  217. * that we need to allow ioc creation on exiting %current as exit
  218. * path may issue IOs from e.g. exit_files(). The exit path is
  219. * responsible for not issuing IO after exit_io_context().
  220. */
  221. task_lock(task);
  222. if (!task->io_context &&
  223. (task == current || !(task->flags & PF_EXITING)))
  224. task->io_context = ioc;
  225. else
  226. kmem_cache_free(iocontext_cachep, ioc);
  227. ret = task->io_context ? 0 : -EBUSY;
  228. task_unlock(task);
  229. return ret;
  230. }