connectionSignalSlot.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/kdev_t.h>
  15. #include <linux/module.h>
  16. #include <linux/err.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/slab.h>
  19. #include <linux/genhd.h>
  20. #include <linux/delay.h>
  21. #include <linux/atomic.h>
  22. #include "blk-cgroup.h"
  23. #include "blk.h"
  24. #define MAX_KEY_LEN 100
  25. static DEFINE_MUTEX(blkcg_pol_mutex);
  26. struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
  27. EXPORT_SYMBOL_GPL(blkcg_root);
  28. static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  29. static bool blkcg_policy_enabled(struct request_queue *q,
  30. const struct blkcg_policy *pol)
  31. {
  32. return pol && test_bit(pol->plid, q->blkcg_pols);
  33. }
  34. /**
  35. * blkg_free - free a blkg
  36. * @blkg: blkg to free
  37. *
  38. * Free @blkg which may be partially allocated.
  39. */
  40. static void blkg_free(struct blkcg_gq *blkg)
  41. {
  42. int i;
  43. if (!blkg)
  44. return;
  45. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  46. struct blkcg_policy *pol = blkcg_policy[i];
  47. struct blkg_policy_data *pd = blkg->pd[i];
  48. if (!pd)
  49. continue;
  50. if (pol && pol->pd_exit_fn)
  51. pol->pd_exit_fn(blkg);
  52. kfree(pd);
  53. }
  54. blk_exit_rl(&blkg->rl);
  55. kfree(blkg);
  56. }
  57. /**
  58. * blkg_alloc - allocate a blkg
  59. * @blkcg: block cgroup the new blkg is associated with
  60. * @q: request_queue the new blkg is associated with
  61. * @gfp_mask: allocation mask to use
  62. *
  63. * Allocate a new blkg assocating @blkcg and @q.
  64. */
  65. static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  66. gfp_t gfp_mask)
  67. {
  68. struct blkcg_gq *blkg;
  69. int i;
  70. /* alloc and init base part */
  71. blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  72. if (!blkg)
  73. return NULL;
  74. blkg->q = q;
  75. INIT_LIST_HEAD(&blkg->q_node);
  76. blkg->blkcg = blkcg;
  77. blkg->refcnt = 1;
  78. /* root blkg uses @q->root_rl, init rl only for !root blkgs */
  79. if (blkcg != &blkcg_root) {
  80. if (blk_init_rl(&blkg->rl, q, gfp_mask))
  81. goto err_free;
  82. blkg->rl.blkg = blkg;
  83. }
  84. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  85. struct blkcg_policy *pol = blkcg_policy[i];
  86. struct blkg_policy_data *pd;
  87. if (!blkcg_policy_enabled(q, pol))
  88. continue;
  89. /* alloc per-policy data and attach it to blkg */
  90. pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
  91. if (!pd)
  92. goto err_free;
  93. blkg->pd[i] = pd;
  94. pd->blkg = blkg;
  95. /* invoke per-policy init */
  96. if (blkcg_policy_enabled(blkg->q, pol))
  97. pol->pd_init_fn(blkg);
  98. }
  99. return blkg;
  100. err_free:
  101. blkg_free(blkg);
  102. return NULL;
  103. }
  104. static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
  105. struct request_queue *q)