preliminaryDataProcessing.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
  4. * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
  6. * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
  7. * - July2000
  8. * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  9. */
  10. /*
  11. * This handles all read/write requests to block devices
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/bio.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/highmem.h>
  19. #include <linux/mm.h>
  20. #include <linux/kernel_stat.h>
  21. #include <linux/string.h>
  22. #include <linux/init.h>
  23. #include <linux/completion.h>
  24. #include <linux/slab.h>
  25. #include <linux/swap.h>
  26. #include <linux/writeback.h>
  27. #include <linux/task_io_accounting_ops.h>
  28. #include <linux/fault-inject.h>
  29. #include <linux/list_sort.h>
  30. #include <linux/delay.h>
  31. #include <linux/ratelimit.h>
  32. #define CREATE_TRACE_POINTS
  33. #include <trace/events/block.h>
  34. #include "blk.h"
  35. #include "blk-cgroup.h"
  36. EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
  37. EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
  38. EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
  39. EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
  40. DEFINE_IDA(blk_queue_ida);
  41. /*
  42. * For the allocated request tables
  43. */
  44. static struct kmem_cache *request_cachep;
  45. /*
  46. * For queue allocation
  47. */
  48. struct kmem_cache *blk_requestq_cachep;
  49. /*
  50. * Controlling structure to kblockd
  51. */
  52. static struct workqueue_struct *kblockd_workqueue;
  53. static void drive_stat_acct(struct request *rq, int new_io)
  54. {
  55. struct hd_struct *part;
  56. int rw = rq_data_dir(rq);
  57. int cpu;
  58. if (!blk_do_io_stat(rq))
  59. return;
  60. cpu = part_stat_lock();
  61. if (!new_io) {
  62. part = rq->part;
  63. part_stat_inc(cpu, part, merges[rw]);
  64. } else {
  65. part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
  66. if (!hd_struct_try_get(part)) {
  67. /*
  68. * The partition is already being removed,
  69. * the request will be accounted on the disk only
  70. *
  71. * We take a reference on disk->part0 although that
  72. * partition will never be deleted, so we can treat
  73. * it as any other partition.
  74. */
  75. part = &rq->rq_disk->part0;
  76. hd_struct_get(part);
  77. }
  78. part_round_stats(cpu, part);
  79. part_inc_in_flight(part, rw);
  80. rq->part = part;
  81. }
  82. part_stat_unlock();
  83. }
  84. void blk_queue_congestion_threshold(struct request_queue *q)
  85. {
  86. int nr;
  87. nr = q->nr_requests - (q->nr_requests / 8) + 1;
  88. if (nr > q->nr_requests)
  89. nr = q->nr_requests;
  90. q->nr_congestion_on = nr;
  91. nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  92. if (nr < 1)
  93. nr = 1;
  94. q->nr_congestion_off = nr;
  95. }
  96. /**
  97. * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
  98. * @bdev: device
  99. *
  100. * Locates the passed device's request queue and returns the address of its
  101. * backing_dev_info
  102. *
  103. * Will return NULL if the request queue cannot be located.
  104. */
  105. struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
  106. {
  107. struct backing_dev_info *ret = NULL;
  108. struct request_queue *q = bdev_get_queue(bdev);
  109. if (q)
  110. ret = &q->backing_dev_info;
  111. return ret;
  112. }
  113. EXPORT_SYMBOL(blk_get_backing_dev_info);
  114. void blk_rq_init(struct request_queue *q, struct request *rq)
  115. {
  116. memset(rq, 0, sizeof(*rq));
  117. INIT_LIST_HEAD(&rq->queuelist);
  118. INIT_LIST_HEAD(&rq->timeout_list);
  119. rq->cpu = -1;
  120. rq->q = q;
  121. rq->__sector = (sector_t) -1;
  122. INIT_HLIST_NODE(&rq->hash);
  123. RB_CLEAR_NODE(&rq->rb_node);
  124. rq->cmd = rq->__cmd;
  125. rq->cmd_len = BLK_MAX_CDB;
  126. rq->tag = -1;
  127. rq->ref_count = 1;
  128. rq->start_time = jiffies;
  129. set_start_time_ns(rq);
  130. rq->part = NULL;
  131. }
  132. EXPORT_SYMBOL(blk_rq_init);
  133. static void req_bio_endio(struct request *rq, struct bio *bio,
  134. unsigned int nbytes, int error)
  135. {
  136. if (error)
  137. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  138. else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  139. error = -EIO;
  140. if (unlikely(nbytes > bio->bi_size)) {
  141. printk(KERN_ERR "%s: want %u bytes done, %u left\n",
  142. __func__, nbytes, bio->bi_size);
  143. nbytes = bio->bi_size;
  144. }
  145. if (unlikely(rq->cmd_flags & REQ_QUIET))
  146. set_bit(BIO_QUIET, &bio->bi_flags);
  147. bio->bi_size -= nbytes;
  148. bio->bi_sector += (nbytes >> 9);
  149. if (bio_integrity(bio))
  150. bio_integrity_advance(bio, nbytes);
  151. /* don't actually finish bio if it's part of flush sequence */
  152. if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
  153. bio_endio(bio, error);
  154. }
  155. void blk_dump_rq_flags(struct request *rq, char *msg)
  156. {
  157. int bit;
  158. printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
  159. rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
  160. rq->cmd_flags);
  161. printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
  162. (unsigned long long)blk_rq_pos(rq),
  163. blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
  164. printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
  165. rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
  166. if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
  167. printk(KERN_INFO " cdb: ");
  168. for (bit = 0; bit < BLK_MAX_CDB; bit++)
  169. printk("%02x ", rq->cmd[bit]);
  170. printk("\n");
  171. }
  172. }
  173. EXPORT_SYMBOL(blk_dump_rq_flags);
  174. static void blk_delay_work(struct work_struct *work)
  175. {
  176. struct request_queue *q;
  177. q = container_of(work, struct request_queue, delay_work.work);
  178. spin_lock_irq(q->queue_lock);
  179. __blk_run_queue(q);
  180. spin_unlock_irq(q->queue_lock);
  181. }
  182. /**
  183. * blk_delay_queue - restart queueing after defined interval
  184. * @q: The &struct request_queue in question
  185. * @msecs: Delay in msecs
  186. *
  187. * Description:
  188. * Sometimes queueing needs to be postponed for a little while, to allow
  189. * resources to come back. This function will make sure that queueing is
  190. * restarted around the specified time. Queue lock must be held.
  191. */
  192. void blk_delay_queue(struct request_queue *q, unsigned long msecs)
  193. {
  194. if (likely(!blk_queue_dead(q)))
  195. queue_delayed_work(kblockd_workqueue, &q->delay_work,
  196. msecs_to_jiffies(msecs));
  197. }
  198. EXPORT_SYMBOL(blk_delay_queue);
  199. /**
  200. * blk_start_queue - restart a previously stopped queue
  201. * @q: The &struct request_queue in question
  202. *
  203. * Description:
  204. * blk_start_queue() will clear the stop flag on the queue, and call
  205. * the request_fn for the queue if it was in a stopped state when
  206. * entered. Also see blk_stop_queue(). Queue lock must be held.
  207. **/
  208. void blk_start_queue(struct request_queue *q)
  209. {
  210. WARN_ON(!irqs_disabled());
  211. queue_flag_clear(QUEUE_FLAG_STOPPED, q);
  212. __blk_run_queue(q);
  213. }
  214. EXPORT_SYMBOL(blk_start_queue);
  215. /**
  216. * blk_stop_queue - stop a queue
  217. * @q: The &struct request_queue in question
  218. *
  219. * Description:
  220. * The Linux block layer assumes that a block driver will consume all
  221. * entries on the request queue when the request_fn strategy is called.
  222. * Often this will not happen, because of hardware limitations (queue
  223. * depth settings). If a device driver gets a 'queue full' response,
  224. * or if it simply chooses not to queue more I/O at one point, it can
  225. * call this function to prevent the request_fn from being called until
  226. * the driver has signalled it's ready to go again. This happens by calling
  227. * blk_start_queue() to restart queue operations. Queue lock must be held.
  228. **/
  229. void blk_stop_queue(struct request_queue *q)
  230. {
  231. cancel_delayed_work(&q->delay_work);
  232. queue_flag_set(QUEUE_FLAG_STOPPED, q);
  233. }
  234. EXPORT_SYMBOL(blk_stop_queue);
  235. /**
  236. * blk_sync_queue - cancel any pending callbacks on a queue
  237. * @q: the queue
  238. *
  239. * Description:
  240. * The block layer may perform asynchronous callback activity
  241. * on a queue, such as calling the unplug function after a timeout.
  242. * A block device may call blk_sync_queue to ensure that any
  243. * such activity is cancelled, thus allowing it to release resources
  244. * that the callbacks might use. The caller must already have made sure
  245. * that its ->make_request_fn will not re-add plugging prior to calling
  246. * this function.
  247. *
  248. * This function does not cancel any asynchronous activity arising
  249. * out of elevator or throttling code. That would require elevaotor_exit()
  250. * and blkcg_exit_queue() to be called with queue lock initialized.
  251. *
  252. */
  253. void blk_sync_queue(struct request_queue *q)
  254. {
  255. del_timer_sync(&q->timeout);
  256. cancel_delayed_work_sync(&q->delay_work);
  257. }
  258. EXPORT_SYMBOL(blk_sync_queue);
  259. /**
  260. * __blk_run_queue_uncond - run a queue whether or not it has been stopped
  261. * @q: The queue to run
  262. *
  263. * Description:
  264. * Invoke request handling on a queue if there are any pending requests.
  265. * May be used to restart request handling after a request has completed.
  266. * This variant runs the queue whether or not the queue has been
  267. * stopped. Must be called with the queue lock held and interrupts
  268. * disabled. See also @blk_run_queue.
  269. */
  270. inline void __blk_run_queue_uncond(struct request_queue *q)
  271. {
  272. if (unlikely(blk_queue_dead(q)))
  273. return;
  274. /*
  275. * Some request_fn implementations, e.g. scsi_request_fn(), unlock
  276. * the queue lock internally. As a result multiple threads may be
  277. * running such a request function concurrently. Keep track of the
  278. * number of active request_fn invocations such that blk_drain_queue()
  279. * can wait until all these request_fn calls have finished.
  280. */
  281. q->request_fn_active++;
  282. q->request_fn(q);
  283. q->request_fn_active--;
  284. }
  285. /**
  286. * __blk_run_queue - run a single device queue
  287. * @q: The queue to run
  288. *
  289. * Description:
  290. * See @blk_run_queue. This variant must be called with the queue lock
  291. * held and interrupts disabled.
  292. */
  293. void __blk_run_queue(struct request_queue *q)
  294. {
  295. if (unlikely(blk_queue_stopped(q)))