|
@@ -679,3 +679,98 @@ EXPORT_SYMBOL(disk_stack_limits);
|
|
|
/**
|
|
|
* blk_queue_dma_pad - set pad mask
|
|
|
* @q: the request queue for the device
|
|
|
+ * @mask: pad mask
|
|
|
+ *
|
|
|
+ * Set dma pad mask.
|
|
|
+ *
|
|
|
+ * Appending pad buffer to a request modifies the last entry of a
|
|
|
+ * scatter list such that it includes the pad buffer.
|
|
|
+ **/
|
|
|
+void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
|
|
|
+{
|
|
|
+ q->dma_pad_mask = mask;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_queue_dma_pad);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_queue_update_dma_pad - update pad mask
|
|
|
+ * @q: the request queue for the device
|
|
|
+ * @mask: pad mask
|
|
|
+ *
|
|
|
+ * Update dma pad mask.
|
|
|
+ *
|
|
|
+ * Appending pad buffer to a request modifies the last entry of a
|
|
|
+ * scatter list such that it includes the pad buffer.
|
|
|
+ **/
|
|
|
+void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
|
|
|
+{
|
|
|
+ if (mask > q->dma_pad_mask)
|
|
|
+ q->dma_pad_mask = mask;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_queue_update_dma_pad);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
|
|
|
+ * @q: the request queue for the device
|
|
|
+ * @dma_drain_needed: fn which returns non-zero if drain is necessary
|
|
|
+ * @buf: physically contiguous buffer
|
|
|
+ * @size: size of the buffer in bytes
|
|
|
+ *
|
|
|
+ * Some devices have excess DMA problems and can't simply discard (or
|
|
|
+ * zero fill) the unwanted piece of the transfer. They have to have a
|
|
|
+ * real area of memory to transfer it into. The use case for this is
|
|
|
+ * ATAPI devices in DMA mode. If the packet command causes a transfer
|
|
|
+ * bigger than the transfer size some HBAs will lock up if there
|
|
|
+ * aren't DMA elements to contain the excess transfer. What this API
|
|
|
+ * does is adjust the queue so that the buf is always appended
|
|
|
+ * silently to the scatterlist.
|
|
|
+ *
|
|
|
+ * Note: This routine adjusts max_hw_segments to make room for appending
|
|
|
+ * the drain buffer. If you call blk_queue_max_segments() after calling
|
|
|
+ * this routine, you must set the limit to one fewer than your device
|
|
|
+ * can support otherwise there won't be room for the drain buffer.
|
|
|
+ */
|
|
|
+int blk_queue_dma_drain(struct request_queue *q,
|
|
|
+ dma_drain_needed_fn *dma_drain_needed,
|
|
|
+ void *buf, unsigned int size)
|
|
|
+{
|
|
|
+ if (queue_max_segments(q) < 2)
|
|
|
+ return -EINVAL;
|
|
|
+ /* make room for appending the drain */
|
|
|
+ blk_queue_max_segments(q, queue_max_segments(q) - 1);
|
|
|
+ q->dma_drain_needed = dma_drain_needed;
|
|
|
+ q->dma_drain_buffer = buf;
|
|
|
+ q->dma_drain_size = size;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_queue_segment_boundary - set boundary rules for segment merging
|
|
|
+ * @q: the request queue for the device
|
|
|
+ * @mask: the memory boundary mask
|
|
|
+ **/
|
|
|
+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
|
|
|
+{
|
|
|
+ if (mask < PAGE_CACHE_SIZE - 1) {
|
|
|
+ mask = PAGE_CACHE_SIZE - 1;
|
|
|
+ printk(KERN_INFO "%s: set to minimum %lx\n",
|
|
|
+ __func__, mask);
|
|
|
+ }
|
|
|
+
|
|
|
+ q->limits.seg_boundary_mask = mask;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_queue_segment_boundary);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_queue_dma_alignment - set dma length and memory alignment
|
|
|
+ * @q: the request queue for the device
|
|
|
+ * @mask: alignment mask
|
|
|
+ *
|
|
|
+ * description:
|
|
|
+ * set required memory and length alignment for direct dma transactions.
|
|
|
+ * this is used when building direct io requests for the queue.
|
|
|
+ *
|
|
|
+ **/
|
|
|
+void blk_queue_dma_alignment(struct request_queue *q, int mask)
|