| 
					
				 | 
			
			
				@@ -128,3 +128,100 @@ void blk_set_default_limits(struct queue_limits *lim) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				 } 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				 EXPORT_SYMBOL(blk_set_default_limits); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+/** 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * blk_set_stacking_limits - set default limits for stacking devices 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * @lim:  the queue_limits structure to reset 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * Description: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *   Returns a queue_limit struct to its default state. Should be used 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *   by stacking drivers like DM that have no internal limits. 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+void blk_set_stacking_limits(struct queue_limits *lim) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+{ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	blk_set_default_limits(lim); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	/* Inherit limits from component devices */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	lim->discard_zeroes_data = 1; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	lim->max_segments = USHRT_MAX; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	lim->max_hw_sectors = UINT_MAX; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	lim->max_sectors = UINT_MAX; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	lim->max_write_same_sectors = UINT_MAX; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+EXPORT_SYMBOL(blk_set_stacking_limits); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+/** 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * blk_queue_make_request - define an alternate make_request function for a device 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * @q:  the request queue for the device to be affected 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * @mfn: the alternate make_request function 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * Description: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    The normal way for &struct bios to be passed to a device 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    driver is for them to be collected into requests on a request 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    queue, and then to allow the device driver to select requests 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    off that queue when it is ready.  This works well for many block 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    devices. However some block devices (typically virtual devices 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    such as md or lvm) do not benefit from the processing on the 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    request queue, and are served best by having the requests passed 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    directly to them.  This can be achieved by providing a function 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    to blk_queue_make_request(). 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * Caveat: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    The driver that does this *must* be able to deal appropriately 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    with buffers in "highmemory". This can be accomplished by either calling 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    blk_queue_bounce() to create a buffer in normal memory. 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ **/ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+{ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	/* 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	 * set defaults 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	 */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	q->nr_requests = BLKDEV_MAX_RQ; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	q->make_request_fn = mfn; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	blk_queue_dma_alignment(q, 511); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	blk_queue_congestion_threshold(q); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	q->nr_batching = BLK_BATCH_REQ; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	blk_set_default_limits(&q->limits); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	/* 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	 * by default assume old behaviour and bounce for any highmem page 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	 */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+EXPORT_SYMBOL(blk_queue_make_request); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+/** 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * blk_queue_bounce_limit - set bounce buffer limit for queue 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * @q: the request queue for the device 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * @dma_mask: the maximum address the device can handle 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ * Description: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    Different hardware can have different requirements as to what pages 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    it can do I/O directly to. A low level driver can call 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    blk_queue_bounce_limit to have lower memory pages allocated as bounce 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ *    buffers for doing I/O to pages residing above @dma_mask. 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ **/ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+{ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	unsigned long b_pfn = dma_mask >> PAGE_SHIFT; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	int dma = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	q->bounce_gfp = GFP_NOIO; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+#if BITS_PER_LONG == 64 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	/* 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	 * Assume anything <= 4GB can be handled by IOMMU.  Actually 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	 * some IOMMUs can handle everything, but I don't know of a 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	 * way to test this here. 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	 */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+		dma = 1; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	q->limits.bounce_pfn = max(max_low_pfn, b_pfn); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+#else 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	if (b_pfn < blk_max_low_pfn) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+		dma = 1; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	q->limits.bounce_pfn = b_pfn; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+#endif 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+	if (dma) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+		init_emergency_isa_pool(); 
			 |