| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192 | 
							- /*
 
-  * Functions related to segment and merge handling
 
-  */
 
- #include <linux/kernel.h>
 
- #include <linux/module.h>
 
- #include <linux/bio.h>
 
- #include <linux/blkdev.h>
 
- #include <linux/scatterlist.h>
 
- #include "blk.h"
 
- static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 
- 					     struct bio *bio)
 
- {
 
- 	struct bio_vec *bv, *bvprv = NULL;
 
- 	int cluster, i, high, highprv = 1;
 
- 	unsigned int seg_size, nr_phys_segs;
 
- 	struct bio *fbio, *bbio;
 
- 	if (!bio)
 
- 		return 0;
 
- 	fbio = bio;
 
- 	cluster = blk_queue_cluster(q);
 
- 	seg_size = 0;
 
- 	nr_phys_segs = 0;
 
- 	for_each_bio(bio) {
 
- 		bio_for_each_segment(bv, bio, i) {
 
- 			/*
 
- 			 * the trick here is making sure that a high page is
 
- 			 * never considered part of another segment, since that
 
- 			 * might change with the bounce page.
 
- 			 */
 
- 			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
 
- 			if (high || highprv)
 
- 				goto new_segment;
 
- 			if (cluster) {
 
- 				if (seg_size + bv->bv_len
 
- 				    > queue_max_segment_size(q))
 
- 					goto new_segment;
 
- 				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
 
- 					goto new_segment;
 
- 				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
 
- 					goto new_segment;
 
- 				seg_size += bv->bv_len;
 
- 				bvprv = bv;
 
- 				continue;
 
- 			}
 
- new_segment:
 
- 			if (nr_phys_segs == 1 && seg_size >
 
- 			    fbio->bi_seg_front_size)
 
- 				fbio->bi_seg_front_size = seg_size;
 
- 			nr_phys_segs++;
 
- 			bvprv = bv;
 
- 			seg_size = bv->bv_len;
 
- 			highprv = high;
 
- 		}
 
- 		bbio = bio;
 
- 	}
 
- 	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
 
- 		fbio->bi_seg_front_size = seg_size;
 
- 	if (seg_size > bbio->bi_seg_back_size)
 
- 		bbio->bi_seg_back_size = seg_size;
 
- 	return nr_phys_segs;
 
- }
 
- void blk_recalc_rq_segments(struct request *rq)
 
- {
 
- 	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 
- }
 
- void blk_recount_segments(struct request_queue *q, struct bio *bio)
 
- {
 
- 	struct bio *nxt = bio->bi_next;
 
- 	bio->bi_next = NULL;
 
- 	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
 
- 	bio->bi_next = nxt;
 
- 	bio->bi_flags |= (1 << BIO_SEG_VALID);
 
- }
 
- EXPORT_SYMBOL(blk_recount_segments);
 
- static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 
- 				   struct bio *nxt)
 
- {
 
- 	if (!blk_queue_cluster(q))
 
- 		return 0;
 
- 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
 
- 	    queue_max_segment_size(q))
 
- 		return 0;
 
- 	if (!bio_has_data(bio))
 
- 		return 1;
 
- 	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
 
- 		return 0;
 
- 	/*
 
- 	 * bio and nxt are contiguous in memory; check if the queue allows
 
- 	 * these two to be merged into one
 
- 	 */
 
- 	if (BIO_SEG_BOUNDARY(q, bio, nxt))
 
- 		return 1;
 
- 	return 0;
 
- }
 
- static void
 
- __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
 
- 		     struct scatterlist *sglist, struct bio_vec **bvprv,
 
- 		     struct scatterlist **sg, int *nsegs, int *cluster)
 
- {
 
- 	int nbytes = bvec->bv_len;
 
- 	if (*bvprv && *cluster) {
 
- 		if ((*sg)->length + nbytes > queue_max_segment_size(q))
 
- 			goto new_segment;
 
- 		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
 
- 			goto new_segment;
 
- 		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
 
- 			goto new_segment;
 
- 		(*sg)->length += nbytes;
 
- 	} else {
 
- new_segment:
 
- 		if (!*sg)
 
- 			*sg = sglist;
 
- 		else {
 
- 			/*
 
- 			 * If the driver previously mapped a shorter
 
- 			 * list, we could see a termination bit
 
- 			 * prematurely unless it fully inits the sg
 
- 			 * table on each mapping. We KNOW that there
 
- 			 * must be more entries here or the driver
 
- 			 * would be buggy, so force clear the
 
- 			 * termination bit to avoid doing a full
 
- 			 * sg_init_table() in drivers for each command.
 
- 			 */
 
- 			(*sg)->page_link &= ~0x02;
 
- 			*sg = sg_next(*sg);
 
- 		}
 
- 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
 
- 		(*nsegs)++;
 
- 	}
 
- 	*bvprv = bvec;
 
- }
 
- /*
 
-  * map a request to scatterlist, return number of sg entries setup. Caller
 
-  * must make sure sg can hold rq->nr_phys_segments entries
 
-  */
 
- int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 
- 		  struct scatterlist *sglist)
 
- {
 
- 	struct bio_vec *bvec, *bvprv;
 
- 	struct req_iterator iter;
 
- 	struct scatterlist *sg;
 
- 	int nsegs, cluster;
 
- 	nsegs = 0;
 
- 	cluster = blk_queue_cluster(q);
 
- 	/*
 
- 	 * for each bio in rq
 
- 	 */
 
- 	bvprv = NULL;
 
- 	sg = NULL;
 
- 	rq_for_each_segment(bvec, rq, iter) {
 
- 		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
 
- 				     &nsegs, &cluster);
 
- 	} /* segments in rq */
 
- 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
 
- 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
 
- 		unsigned int pad_len =
 
- 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
 
- 		sg->length += pad_len;
 
- 		rq->extra_len += pad_len;
 
- 	}
 
- 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
 
- 		if (rq->cmd_flags & REQ_WRITE)
 
 
  |