/* DMA Proxy
*
* This module is designed to be a small example of a DMA device driver that is
* a client to the DMA Engine using the AXI DMA driver. It serves as a proxy for
* kernel space DMA control to a user space application.
*
* A zero copy scheme is provided by allowing user space to mmap a kernel allocated
* memory region into user space, referred to as a proxy channel interface. The
* ioctl function is provided to start a DMA transfer which then blocks until the
* transfer is complete. No input arguments are being used in the ioctl function.
*
* There is an associated user space application, dma_proxy_test.c, and dma_proxy.h
* that work with this device driver.
*
* The hardware design was tested with an AXI DMA without scatter gather and
* with the transmit channel looped back to the receive channel.
*
*/
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/workqueue.h>
#include "dma_proxy.h"
#define CACHED_BUFFERS
// #define INTERNAL_TEST
/* The following module parameter controls where the allocated interface memory area is cached or not
* such that both can be illustrated. Add cached_buffers=1 to the command line insert of the module
* to cause the allocated memory to be cached.
*/
static unsigned cached_buffers = 0;
module_param(cached_buffers, int, S_IRUGO);
MODULE_LICENSE("GPL");
#define DRIVER_NAME "dma_proxy"
#define CHANNEL_COUNT 2
#define ERROR -1
#define NOT_LAST_CHANNEL 0
#define LAST_CHANNEL 1
/* The following data structure represents a single channel of DMA, transmit or receive in the case
* when using AXI DMA. It contains all the data to be maintained for the channel.
*/
struct dma_proxy_channel {
struct dma_proxy_channel_interface *interface_p; /* user to kernel space interface */
dma_addr_t interface_phys_addr;
struct device *proxy_device_p; /* character device support */
struct device *dma_device_p;
dev_t dev_node;
struct cdev cdev;
struct class *class_p;
struct dma_chan *channel_p; /* dma support */
struct completion cmp;
dma_cookie_t cookie;
dma_addr_t dma_handle;
u32 direction; /* DMA_MEM_TO_DEV or DMA_DEV_TO_MEM */
};
/* Allocate the channels for this example statically rather than dynamically for simplicity.
*/
static struct dma_proxy_channel channels[CHANNEL_COUNT];
/* Handle a callback and indicate the DMA transfer is complete to another
* thread of control
*/
static void sync_callback(void *completion)
{
/* Indicate the DMA transaction completed to allow the other
* thread of control to finish processing
*/
complete(completion);
}
/* Prepare a DMA buffer to be used in a DMA transaction, submit it to the DMA engine
* to queued and return a cookie that can be used to track that status of the
* transaction
*/
static dma_cookie_t start_transfer(struct dma_proxy_channel *pchannel_p)
{
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
struct dma_async_tx_descriptor *chan_desc;
dma_cookie_t cookie;
struct dma_proxy_channel_interface *interface_p = pchannel_p->interface_p;
/* Create a buffer (channel) descriptor for the buffer since only a
* single buffer is being used for this transfer
*/
chan_desc = dmaengine_prep_slave_single(pchannel_p->channel_p, pchannel_p->dma_handle,
interface_p->length,
pchannel_p->direction,
flags);
/* Make sure the operation was completed successfully
*/
if (!chan_desc) {
printk(KERN_ERR "dmaengine_prep_slave_single error\n");
cookie = -EBUSY;
} else {
chan_desc->callback = sync_callback;
chan_desc->callback_param = &pchannel_p->cmp;
/* Initialize the completion for the transfer and before using it
* then submit the transaction to the DMA engine so that it's queued
* up to be processed later and get a cookie to track it's status
*/
init_completion(&pchannel_p->cmp);
cookie = dmaengine_submit(chan_desc);
/* Start the DMA transaction which was previously queued up in the DMA engine
*/
dma_async_issue_pending(pchannel_p->channel_p);
}
return cookie;
}
/* Wait for a DMA transfer that was previously submitted to the DMA engine
* wait for it complete, timeout or have an error
*/
static void wait_for_transfer(struct dma_proxy_channel *pchannel_p)
{
unsigned long timeout = msecs_to_jiffies(3000);
enum dma_status status;
pchannel_p->interface_p->status = PROXY_BUSY;
/* Wait for the transaction to complete, or timeout, or get an error
*/
timeout = wait_for_completion_timeout(&pchannel_p->cmp, timeout);
status = dma_async_is_tx_complete(pchannel_p->channel_p, pchannel_p->cookie, NULL, NULL);
if (timeout == 0) {
pchannel_p->interface_p->status = PROXY_TIMEOUT;
printk(KERN_ERR "DMA timed out\n");
} else if (status != DMA_COMPLETE) {
pchannel_p->interface_p->status = PROXY_ERROR;
printk(KERN_ERR "DMA returned completion callback status of: %s\n",
status == DMA_ERROR ? "error" : "in progress");
} else
pchannel_p->interface_p->status = PROXY_NO_ERROR;
if (cached_buffers) {
/* Cached buffers need to be unmapped after the transfer is done so that the CPU
* can see the new data when being received. This is done in this function to
* allow this function to be called from another thread of control also (non-blocking).
*/
u32 map_direction;
if (pchannel_p->direction == DMA_MEM_TO_DEV)
map_direction = DMA_TO_DEVICE;
else
map_direction = DMA_FROM_DEVICE;
dma_unmap_single(pchannel_p->dma_device_p, pchannel_p->dma_handle,
pchannel_p->interface_p->length,
map_direction);
}
}
/* For debug only, print out the channel details
*/
void print_channel(struct dma_proxy_channel *pchannel_p)
{
struct dma_proxy_channel_interface *interface_p = pchannel_p->interface_p;
printk("length = %d ", interface_p->length);
if (pchannel_p->direction == DMA_MEM_TO_DEV)
printk("tx direction ");
else
printk("rx direction ");
}
/* Setup the DMA transfer for the channel by taking care of any cache operations
* and the start it.
*/
static void transfer(struct dma_proxy_channel *pchannel_p)
{
struct dma_proxy_channel_interface *interface_p = pchannel_p->interface_p;
u32 map_direction;
print_channel(pchannel_p);
if (cached_buffers) {
/* Cached buffers need to be handled before starting the transfer so that
* any cached data is pushed to memory.
*/
if (pchannel_p->direction == DMA_MEM_TO_DEV)
map_direction = DMA_TO_DEVICE;
else
map_direction = DMA_FROM_DEVICE;
pchannel_p->dma_handle = dma_map_single(pchannel_p->dma_device_p,
interface_p->buffer,
interface_p->length,
map_direction);
} else {
/* The physical address of the buffer in the interface is needed for the dma transfer
* as the buffer may not be the first data in the interface
*/
u32 offset = (u32)&interface_p->buffer - (u32)interface_p;
pchannel_p->dma_handle = (dma_addr_t)(pchannel_p->interface_phys_addr + offset);
}
/* Start the DMA transfer and make sure there were not any errors
*/
printk(KERN_INFO "Starting DMA transfers\n");
pchannel_p->cookie = start_transfer(pchannel_p);
if (dma_submit_error(pchannel_p->cookie)) {
printk(KERN_ERR "xdma_prep_buffer error\n");
return;
}
wait_for_transfer(pchannel_p);
}
/* The following functions are designed to test the driver from within the device
* driver without any user space.
*/
#ifdef INTERNAL_TEST
static void tx_test(struct work_struct *unused)
{
transfer(&channels[0]);
}
static void test(void)
{
int i;
const int test_size = 1024 * 1024;
struct work_struct work;
/* Initialize the transmit buffer with a pattern and then start
* the seperate thread of control to handle the transmit transfer
* since the functions block waiting for the transfer to complete.
*/
for (i = 0; i <
dma.rar_dma的头文件
版权申诉
199 浏览量
2022-09-24
22:20:08
上传
评论
收藏 6KB RAR 举报
JonSco
- 粉丝: 75
- 资源: 1万+
最新资源
- 基于Java的本科探索学习项目设计源码 - 本科探索
- 基于Javascript和Python的微商城项目设计源码 - MicroMall
- 基于Java的网上订餐系统设计源码 - online ordering system
- 基于Javascript的超级美眉网络资源管理应用模块设计源码
- 基于Typescript和PHP的编程知识储备库设计源码 - study-php
- Screenshot_2024-05-28-11-40-58-177_com.tencent.mm.jpg
- 基于Dart的Flutter小提琴调音器APP设计源码 - violinhelper
- 基于JavaScript和CSS的随寻订购网页设计源码 - web-order
- 基于MATLAB的声纹识别系统设计源码 - VoiceprintRecognition
- 基于Java的微服务插件集合设计源码 - wsy-plugins
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈