|  | @@ -0,0 +1,132 @@
 | 
	
		
			
				|  |  | +#ifndef __ASM_AVR32_DMA_MAPPING_H
 | 
	
		
			
				|  |  | +#define __ASM_AVR32_DMA_MAPPING_H
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#include <linux/mm.h>
 | 
	
		
			
				|  |  | +#include <linux/device.h>
 | 
	
		
			
				|  |  | +#include <linux/scatterlist.h>
 | 
	
		
			
				|  |  | +#include <asm/processor.h>
 | 
	
		
			
				|  |  | +#include <asm/cacheflush.h>
 | 
	
		
			
				|  |  | +#include <asm/io.h>
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 | 
	
		
			
				|  |  | +	int direction);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/*
 | 
	
		
			
				|  |  | + * Return whether the given device DMA address mask can be supported
 | 
	
		
			
				|  |  | + * properly.  For example, if your device can only drive the low 24-bits
 | 
	
		
			
				|  |  | + * during bus mastering, then you would pass 0x00ffffff as the mask
 | 
	
		
			
				|  |  | + * to this function.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static inline int dma_supported(struct device *dev, u64 mask)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	/* Fix when needed. I really don't know of any limitations */
 | 
	
		
			
				|  |  | +	return 1;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline int dma_set_mask(struct device *dev, u64 dma_mask)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
 | 
	
		
			
				|  |  | +		return -EIO;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	*dev->dma_mask = dma_mask;
 | 
	
		
			
				|  |  | +	return 0;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/*
 | 
	
		
			
				|  |  | + * dma_map_single can't fail as it is implemented now.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	return 0;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * dma_alloc_coherent - allocate consistent memory for DMA
 | 
	
		
			
				|  |  | + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 | 
	
		
			
				|  |  | + * @size: required memory size
 | 
	
		
			
				|  |  | + * @handle: bus-specific DMA address
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Allocate some uncached, unbuffered memory for a device for
 | 
	
		
			
				|  |  | + * performing DMA.  This function allocates pages, and will
 | 
	
		
			
				|  |  | + * return the CPU-viewed address, and sets @handle to be the
 | 
	
		
			
				|  |  | + * device-viewed address.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +extern void *dma_alloc_coherent(struct device *dev, size_t size,
 | 
	
		
			
				|  |  | +				dma_addr_t *handle, gfp_t gfp);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * dma_free_coherent - free memory allocated by dma_alloc_coherent
 | 
	
		
			
				|  |  | + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 | 
	
		
			
				|  |  | + * @size: size of memory originally requested in dma_alloc_coherent
 | 
	
		
			
				|  |  | + * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 | 
	
		
			
				|  |  | + * @handle: device-view address returned from dma_alloc_coherent
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Free (and unmap) a DMA buffer previously allocated by
 | 
	
		
			
				|  |  | + * dma_alloc_coherent().
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * References to memory and mappings associated with cpu_addr/handle
 | 
	
		
			
				|  |  | + * during and after this call executing are illegal.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +extern void dma_free_coherent(struct device *dev, size_t size,
 | 
	
		
			
				|  |  | +			      void *cpu_addr, dma_addr_t handle);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * dma_alloc_writecombine - allocate write-combining memory for DMA
 | 
	
		
			
				|  |  | + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 | 
	
		
			
				|  |  | + * @size: required memory size
 | 
	
		
			
				|  |  | + * @handle: bus-specific DMA address
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Allocate some uncached, buffered memory for a device for
 | 
	
		
			
				|  |  | + * performing DMA.  This function allocates pages, and will
 | 
	
		
			
				|  |  | + * return the CPU-viewed address, and sets @handle to be the
 | 
	
		
			
				|  |  | + * device-viewed address.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +extern void *dma_alloc_writecombine(struct device *dev, size_t size,
 | 
	
		
			
				|  |  | +				    dma_addr_t *handle, gfp_t gfp);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * dma_free_coherent - free memory allocated by dma_alloc_writecombine
 | 
	
		
			
				|  |  | + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 | 
	
		
			
				|  |  | + * @size: size of memory originally requested in dma_alloc_writecombine
 | 
	
		
			
				|  |  | + * @cpu_addr: CPU-view address returned from dma_alloc_writecombine
 | 
	
		
			
				|  |  | + * @handle: device-view address returned from dma_alloc_writecombine
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Free (and unmap) a DMA buffer previously allocated by
 | 
	
		
			
				|  |  | + * dma_alloc_writecombine().
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * References to memory and mappings associated with cpu_addr/handle
 | 
	
		
			
				|  |  | + * during and after this call executing are illegal.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +extern void dma_free_writecombine(struct device *dev, size_t size,
 | 
	
		
			
				|  |  | +				  void *cpu_addr, dma_addr_t handle);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * dma_map_single - map a single buffer for streaming DMA
 | 
	
		
			
				|  |  | + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 | 
	
		
			
				|  |  | + * @cpu_addr: CPU direct mapped address of buffer
 | 
	
		
			
				|  |  | + * @size: size of buffer to map
 | 
	
		
			
				|  |  | + * @dir: DMA transfer direction
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Ensure that any data held in the cache is appropriately discarded
 | 
	
		
			
				|  |  | + * or written back.
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * The device owns this memory once this call has completed.  The CPU
 | 
	
		
			
				|  |  | + * can regain ownership by calling dma_unmap_single() or dma_sync_single().
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +static inline dma_addr_t
 | 
	
		
			
				|  |  | +dma_map_single(struct device *dev, void *cpu_addr, size_t size,
 | 
	
		
			
				|  |  | +	       enum dma_data_direction direction)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	dma_cache_sync(dev, cpu_addr, size, direction);
 | 
	
		
			
				|  |  | +	return virt_to_bus(cpu_addr);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/**
 | 
	
		
			
				|  |  | + * dma_unmap_single - unmap a single buffer previously mapped
 | 
	
		
			
				|  |  | + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 | 
	
		
			
				|  |  | + * @handle: DMA address of buffer
 | 
	
		
			
				|  |  | + * @size: size of buffer to map
 | 
	
		
			
				|  |  | + * @dir: DMA transfer direction
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Unmap a single streaming mode DMA translation.  The handle and size
 | 
	
		
			
				|  |  | + * must match what was provided in the previous dma_map_single() call.
 |