/*
* Microsemi Switchtec(tm) PCIe Management Driver
* Copyright (c) 2017, Microsemi Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/switchtec.h>
#include <linux/switchtec_ioctl.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/nospec.h>
#include <linux/aer.h>
#include "version.h"
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Microsemi Corporation");
static int max_devices = 16;
module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
static bool use_dma_mrpc = true;
module_param(use_dma_mrpc, bool, 0644);
MODULE_PARM_DESC(use_dma_mrpc,
"Enable the use of the DMA MRPC feature");
static int nirqs = 32;
module_param(nirqs, int, 0644);
MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
static dev_t switchtec_devt;
static DEFINE_IDA(switchtec_minor_ida);
struct class *switchtec_class;
EXPORT_SYMBOL_GPL(switchtec_class);
enum mrpc_state {
MRPC_IDLE = 0,
MRPC_QUEUED,
MRPC_RUNNING,
MRPC_DONE,
MRPC_IO_ERROR,
};
struct switchtec_user {
struct switchtec_dev *stdev;
enum mrpc_state state;
wait_queue_head_t cmd_comp;
struct kref kref;
struct list_head list;
bool cmd_done;
u32 cmd;
u32 status;
u32 return_code;
size_t data_len;
size_t read_len;
unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
int event_cnt;
};
static int check_access(struct switchtec_dev *stdev)
{
u32 device = ioread32(&stdev->mmio_sys_info->device_id);
return stdev->pdev->device == device;
}
static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
{
struct switchtec_user *stuser;
stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
if (!stuser)
return ERR_PTR(-ENOMEM);
get_device(&stdev->dev);
stuser->stdev = stdev;
kref_init(&stuser->kref);
INIT_LIST_HEAD(&stuser->list);
init_waitqueue_head(&stuser->cmd_comp);
stuser->event_cnt = atomic_read(&stdev->event_cnt);
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
return stuser;
}
static void stuser_free(struct kref *kref)
{
struct switchtec_user *stuser;
stuser = container_of(kref, struct switchtec_user, kref);
dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
put_device(&stuser->stdev->dev);
kfree(stuser);
}
static void stuser_put(struct switchtec_user *stuser)
{
kref_put(&stuser->kref, stuser_free);
}
static void stuser_set_state(struct switchtec_user *stuser,
enum mrpc_state state)
{
/* requires the mrpc_mutex to already be held when called */
const char * const state_names[] = {
[MRPC_IDLE] = "IDLE",
[MRPC_QUEUED] = "QUEUED",
[MRPC_RUNNING] = "RUNNING",
[MRPC_DONE] = "DONE",
[MRPC_IO_ERROR] = "IO_ERROR",
};
stuser->state = state;
dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
stuser, state_names[state]);
}
static void mrpc_complete_cmd(struct switchtec_dev *stdev);
static void flush_wc_buf(struct switchtec_dev *stdev)
{
struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
SWITCHTEC_NTB_REG_DBMSG_OFFSET;
ioread32(&mmio_dbmsg->odb);
}
static void mrpc_cmd_submit(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_user *stuser;
if (stdev->mrpc_busy)
return;
if (list_empty(&stdev->mrpc_queue))
return;
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
list);
if (stdev->dma_mrpc) {
stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
}
stuser_set_state(stuser, MRPC_RUNNING);
stdev->mrpc_busy = 1;
memcpy_toio(&stdev->mmio_mrpc->input_data,
stuser->data, stuser->data_len);
flush_wc_buf(stdev);
iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
schedule_delayed_work(&stdev->mrpc_timeout,
msecs_to_jiffies(500));
}
static int mrpc_queue_cmd(struct switchtec_user *stuser)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_dev *stdev = stuser->stdev;
kref_get(&stuser->kref);
stuser->read_len = sizeof(stuser->data);
stuser_set_state(stuser, MRPC_QUEUED);
stuser->cmd_done = false;
list_add_tail(&stuser->list, &stdev->mrpc_queue);
mrpc_cmd_submit(stdev);
return 0;
}
static void mrpc_cleanup_cmd(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_user *stuser = list_entry(stdev->mrpc_queue.next,
struct switchtec_user, list);
stuser->cmd_done = true;
wake_up_interruptible(&stuser->cmd_comp);
list_del_init(&stuser->list);
stuser_put(stuser);
stdev->mrpc_busy = 0;
mrpc_cmd_submit(stdev);
}
static void mrpc_complete_cmd(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_user *stuser;
if (list_empty(&stdev->mrpc_queue))
return;
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
list);
if (stdev->dma_mrpc)
stuser->status = stdev->dma_mrpc->status;
else
stuser->status = ioread32(&stdev->mmio_mrpc->status);
if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
return;
stuser_set_state(stuser, MRPC_DONE);
stuser->return_code = 0;
if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE &&
stuser->status != SWITCHTEC_MRPC_STATUS_ERROR)
goto out;
if (stdev->dma_mrpc)
stuser->return_code = stdev->dma_mrpc->rtn_code;
else
stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
if (stuser->return_code != 0)
goto out;
if (stdev->dma_mrpc)
memcpy(stuser->data, &stdev->dma_mrpc->data,
stuser->read_len);
else
memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
stuser->read_len);
out:
mrpc_cleanup_cmd(stdev);
}
static void mrpc_event_work(struct work_struct *work)
{
struct switchtec_dev *stdev;
stdev = container_of(work, struct switchtec_dev, mrpc_work);
dev_dbg(&stdev->dev, "%s\n", __func__);
mutex_lock(&stdev->mrpc_mutex);
cancel_delayed_work(&stdev->mrpc_timeout);
mrpc_complete_cmd(stdev);
mutex_unlock(&stdev->mrpc_mutex);
}
static void mrpc_error_complete_cmd(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
struct switchtec_user *stuser;
if (list_empty(&stdev->mrpc_queue))
return;
stuser = list_entry(stdev->mrpc_queue.next,
struct switchtec_user, list);
stuser_set_state(stuser, MRPC_IO_ERROR);
mrpc_cleanup_cmd(stdev);
}
static void mrpc_timeout_work(struct work_struct *work)
{
struct switchtec_dev *stdev;
u32 status;
stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
dev_dbg(&stdev->dev, "%s\n", __func__);
mutex_lock(&stdev->mrpc_mutex);
if (!check_access(stdev)) {
mrpc_error_complete_cmd(stdev);
goto out;
}
if (stdev->dma_mrpc)
status = stdev->dma_mrpc->status;
else
status = ioread32(&stdev->mmio_mrpc->status);
if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
schedule_delayed_work(&stdev->mrpc_timeout,
msecs_to_jiffies(500));
goto out;
}
mrpc_complete_cmd(stdev);
out:
mutex_unlock(&stdev->mrpc_mutex);
}
static ssize_t device_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
u32 ver;
ver = ioread32(&stdev->mmio_sys_info->device_version);
return sysfs_em