/**
* @file axidma_dma.c
* @date Saturday, November 14, 2015 at 02:28:23 PM EST
* @author Brandon Perez (bmperez)
* @author Jared Choi (jaewonch)
*
* This module contains the interface to the DMA engine for the AXI DMA module.
*
* @bug No known bugs.
**/
// Kernel dependencies
#include <linux/delay.h> // Milliseconds to jiffies converstion
#include <linux/wait.h> // Completion related functions
#include <linux/of_dma.h> //lx 20220314
/* <linux/signal.h> was moved to <linux/sched/signal.h> in the 4.11 kernel */
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
#include <linux/sched.h> // Send signal to process function
#else
#include <linux/sched/signal.h> // send_sig_info function
#endif
#include <linux/dmaengine.h> // DMA types and functions
#include <linux/slab.h> // Allocation functions
#include <linux/errno.h> // Linux error codes
#include <linux/platform_device.h> // Platform device definitions
#include <linux/device.h> // Device definitions and functions
/* Between 3.x and 4.x, the path to Xilinx's DMA include file changes. However,
* in some 4.x kernels, the path is still the old one from 3.x. The macro is
* defined by the Makefile, when specified by the user. */
#ifndef XILINX_DMA_INCLUDE_PATH_FIXUP
#include <linux/dma/xilinx_dma.h> // Xilinx DMA config structures
#else
#include <linux/amba/xilinx_dma.h> // Xilinx DMA config structures
#endif
// Local dependencies
#include "axidma.h" // Internal definitions
#include "axidma_ioctl.h" // IOCTL interface definition and types
#include <linux/interrupt.h>
/*----------------------------------------------------------------------------
* Internal Definitions
*----------------------------------------------------------------------------*/
// The default timeout for DMA is 10 seconds
#define AXIDMA_DMA_TIMEOUT 10000
int read_dma_complete_flag;
// A convenient structure to pass between prep and start transfer functions
struct axidma_transfer {
int sg_len; // The length of the BD array
struct scatterlist *sg_list; // List of buffer descriptors
bool wait; // Indicates if we should wait
dma_cookie_t cookie; // The DMA cookie for the transfer
struct completion comp; // A completion to use for waiting
enum axidma_dir dir; // The direction of the transfer
enum axidma_type type; // The type of the transfer (VDMA/DMA)
int channel_id; // The ID of the channel
int notify_signal; // The signal to use for async transfers
struct task_struct *process; // The process requesting the transfer
struct axidma_cb_data *cb_data; // The callback data struct
// VDMA specific fields (kept as union for extensability)
union {
struct axidma_video_frame frame; // Frame information for VDMA
};
};
// The data to pass to the DMA transfer completion callback function
struct axidma_cb_data {
int channel_id; // The id of the channel used
int notify_signal; // For async, signal to send
struct task_struct *process; // The process to send the signal to
struct completion *comp; // For sync, the notification to kernel
};
/*----------------------------------------------------------------------------
* Enumeration Conversions
*----------------------------------------------------------------------------*/
static char *axidma_dir_to_string(enum axidma_dir dma_dir)
{
BUG_ON(dma_dir != AXIDMA_WRITE && dma_dir != AXIDMA_READ);
return (dma_dir == AXIDMA_WRITE) ? "transmit" : "receive";
}
static char *axidma_type_to_string(enum axidma_type dma_type)
{
BUG_ON(dma_type != AXIDMA_DMA && dma_type != AXIDMA_VDMA);
return (dma_type == AXIDMA_DMA) ? "DMA" : "VDMA";
}
// Convert the AXI DMA direction enumeration to a DMA direction enumeration
static enum dma_transfer_direction axidma_to_dma_dir(enum axidma_dir dma_dir)
{
BUG_ON(dma_dir != AXIDMA_WRITE && dma_dir != AXIDMA_READ);
return (dma_dir == AXIDMA_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
}
/*----------------------------------------------------------------------------
* DMA Operations Helper Functions
*----------------------------------------------------------------------------*/
static int axidma_init_sg_entry(struct axidma_device *dev,
struct scatterlist *sg_list, int index, void *buf, size_t buf_len)
{
dma_addr_t dma_addr;
// Get the DMA address from the user virtual address
dma_addr = axidma_uservirt_to_dma(dev, buf, buf_len);
if (dma_addr == (dma_addr_t)NULL) {
axidma_err("Requested transfer address %p does not fall within a "
"previously allocated DMA buffer.\n", buf);
return -EFAULT;
}
// Initialize the scatter-gather table entry
sg_dma_address(&sg_list[index]) = dma_addr;
sg_dma_len(&sg_list[index]) = buf_len;
return 0;
}
static struct axidma_chan *axidma_get_chan(struct axidma_device *dev,
int channel_id)
{
int i;
struct axidma_chan *chan;
// Find the channel with the given ID that matches the type and direction
for (i = 0; i < dev->num_chans; i++)
{
chan = &dev->channels[i];
if (chan->channel_id == channel_id) {
return chan;
}
}
return NULL;
}
static void axidma_dma_callback(void *data)
{
struct axidma_cb_data *cb_data;
//struct siginfo sig_info;
struct kernel_siginfo sig_info;//lx 20220314
//axidma_err("qc-- enter axidma_dma_callback...\n");
/* For synchronous transfers, notify the kernel thread waiting. For
* asynchronous transfers, send a signal to userspace if requested. */
cb_data = data;
if (cb_data->comp != NULL) {
read_dma_complete_flag = 0;
complete(cb_data->comp);
read_dma_complete_flag = 1;
//axidma_err("qc-- 1111 complete\n");
} else if (VALID_NOTIFY_SIGNAL(cb_data->notify_signal)) {
memset(&sig_info, 0, sizeof(sig_info));
sig_info.si_signo = cb_data->notify_signal;
sig_info.si_code = SI_QUEUE;
sig_info.si_int = cb_data->channel_id;
send_sig_info(cb_data->notify_signal, &sig_info, cb_data->process);
//axidma_err("qc-- 2222 signal\n");
}
}
// Setup the config structure for VDMA
static void axidma_setup_vdma_config(struct xilinx_vdma_config *dma_config)
{
memset(dma_config, 0, sizeof(*dma_config));
dma_config->frm_dly = 0; // Number of frames to delay
dma_config->gen_lock = 0; // Genlock, VDMA runs on fsyncs
dma_config->master = 0; // VDMA is the genlock master
dma_config->frm_cnt_en = 1; // Interrupt based on frame count
dma_config->park = 0; // Continuously process all frames
dma_config->park_frm = 0; // Frame to stop (park) at (N/A)
dma_config->coalesc = 1; // Interrupt after one frame completion
dma_config->delay = 0; // Disable the delay counter interrupt
dma_config->reset = 0; // Don't reset the channel
dma_config->ext_fsync = 0; // VDMA handles synchronizes itself
return;
}
static int axidma_prep_transfer(struct axidma_chan *axidma_chan,
struct axidma_transfer *dma_tfr)
{
struct dma_chan *chan;
struct dma_device *dma_dev;
struct dma_async_tx_descriptor *dma_txnd;
struct completion *dma_comp;
struct xilinx_vdma_config vdma_config;
struct axidma_cb_data *cb_data;
struct dma_interleaved_template dma_template;
enum dma_transfer_direction dma_dir;
enum dma_ctrl_flags dma_flags;
struct scatterlist *sg_list;
int sg_len;
dma_cookie_t dma_cookie;
char *direction, *type;
int rc;
// Get the fields from the structures
chan