|
@@ -774,3 +774,64 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
|
|
|
*
|
|
|
**/
|
|
|
void blk_queue_dma_alignment(struct request_queue *q, int mask)
|
|
|
+{
|
|
|
+ q->dma_alignment = mask;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_queue_dma_alignment);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_queue_update_dma_alignment - update dma length and memory alignment
|
|
|
+ * @q: the request queue for the device
|
|
|
+ * @mask: alignment mask
|
|
|
+ *
|
|
|
+ * description:
|
|
|
+ * update required memory and length alignment for direct dma transactions.
|
|
|
+ * If the requested alignment is larger than the current alignment, then
|
|
|
+ * the current queue alignment is updated to the new value, otherwise it
|
|
|
+ * is left alone. The design of this is to allow multiple objects
|
|
|
+ * (driver, device, transport etc) to set their respective
|
|
|
+ * alignments without having them interfere.
|
|
|
+ *
|
|
|
+ **/
|
|
|
+void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
|
|
+{
|
|
|
+ BUG_ON(mask > PAGE_SIZE);
|
|
|
+
|
|
|
+ if (mask > q->dma_alignment)
|
|
|
+ q->dma_alignment = mask;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
|
|
+
|
|
|
+/**
|
|
|
+ * blk_queue_flush - configure queue's cache flush capability
|
|
|
+ * @q: the request queue for the device
|
|
|
+ * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
|
|
|
+ *
|
|
|
+ * Tell block layer cache flush capability of @q. If it supports
|
|
|
+ * flushing, REQ_FLUSH should be set. If it supports bypassing
|
|
|
+ * write cache for individual writes, REQ_FUA should be set.
|
|
|
+ */
|
|
|
+void blk_queue_flush(struct request_queue *q, unsigned int flush)
|
|
|
+{
|
|
|
+ WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
|
|
|
+
|
|
|
+ if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
|
|
|
+ flush &= ~REQ_FUA;
|
|
|
+
|
|
|
+ q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_queue_flush);
|
|
|
+
|
|
|
+void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
|
|
|
+{
|
|
|
+ q->flush_not_queueable = !queueable;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
|
|
|
+
|
|
|
+static int __init blk_settings_init(void)
|
|
|
+{
|
|
|
+ blk_max_low_pfn = max_low_pfn - 1;
|
|
|
+ blk_max_pfn = max_pfn - 1;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+subsys_initcall(blk_settings_init);
|