/*
* Copyright 2016-2022 The MathWorks, Inc.
*
* File: xcp_daq.c
*
* Abstract:
* Implementation of XCP Protocol Layer DAQ Lists (and STIM) support
*/
#include "xcp_common.h"
#include "xcp.h"
#include "xcp_internal.h"
#include "xcp_cfg.h"
#include "xcp_daq.h"
#ifdef XCP_DAQ_SUPPORT
#include "xcp_daq_ext.h"
#include "xcp_transport_internal.h"
#include "xcp_types.h"
#include "xcp_daq_types.h"
#include "xcp_mem.h"
/*****************************************************************************
Internal Global variables specific to DAQ support
******************************************************************************/
/** Dynamic DAQ Lists data structures */
static XcpDaqLists xcpDynamicDaqLists;
/** Number of event threads currently in execution */
static size_t xcpRunningEventCounter = 0;
/** ID of the selected DAQ List on START_STOP_DAQ_LIST command */
#define XCP_INVALID_DAQ_LIST_ID -1
static int32_T startStopDaqListId = XCP_INVALID_DAQ_LIST_ID;
/** Pointer to current ODT entry */
static XcpDaqPtr xcpCurrentDaq = { 0xFFFF, 0xFF, 0xFF };
static XcpEventCustomMemoryManager xcpEventCustomMemoryManager = {XCP_DAQ_CUSTOM_MEMORY_INVALID_EVENT_ID, NULL, NULL};
#ifndef XCP_DAQ_LIST_INDEPENDENT_EVENT_LOCK
#ifdef XCP_LOCKLESS_SYNC_DATA_TRANSFER_SUPPORT
/* XCP_LOCKLESS_SYNC_DATA_TRANSFER_SUPPORT is typically enabled
for multi-core applications, where it is critical to limit
the interference between threads streaming data.
For this reason, by default we enable the independent lock
of the DAQ list data structures between events */
#define XCP_DAQ_LIST_INDEPENDENT_EVENT_LOCK 1
#else
/* XCP_LOCKLESS_SYNC_DATA_TRANSFER_SUPPORT is typically disabled
for single-core bare-metal applications, where it is critical
to limit the memory footprint of the target executable.
For this reason, by default we disable the independent lock
of the DAQ list data structures between events and use a single
mutex to protect the DAQ lists data structures */
#define XCP_DAQ_LIST_INDEPENDENT_EVENT_LOCK 0
#endif
#endif
#if !defined(XCP_DAQ_LIST_INDEPENDENT_EVENT_LOCK) || (XCP_DAQ_LIST_INDEPENDENT_EVENT_LOCK == 0)
/* Mutex protecting against the concurrent access to DAQ Lists data structures */
static XCP_MUTEX_DEFINE(daqLock);
static boolean_T xcpDaqWriteLock(XcpEventIdType eventId)
{
boolean_T locked = false;
XCP_UNUSED_PARAM(eventId);
XCP_WRITE_TRY_LOCK(daqLock, xcpRunningEventCounter, locked);
return locked;
}
static void xcpDaqWriteUnlock(XcpEventIdType eventId)
{
XCP_UNUSED_PARAM(eventId);
XCP_WRITE_UNLOCK(daqLock);
}
static void xcpDaqReadLock(XcpEventIdType eventId)
{
XCP_UNUSED_PARAM(eventId);
XCP_READ_LOCK(daqLock, xcpRunningEventCounter);
}
static void xcpDaqReadUnlock(XcpEventIdType eventId)
{
XCP_UNUSED_PARAM(eventId);
XCP_READ_UNLOCK(daqLock, xcpRunningEventCounter);
}
#else
/* Each XCP event has an independent mutex to protect the read/write access
to the DAQ list data structures */
#ifndef XCP_MAX_EVENT_DATA_NUMBER
#define XCP_MAX_EVENT_DATA_NUMBER XCP_MAX_EVENT_CHANNEL
#endif
typedef struct XcpEventData {
XCP_MUTEX_DEFINE(lock);
} XcpEventData;
/** Array of event-specific data */
static XcpEventData xcpEventData[XCP_MAX_EVENT_DATA_NUMBER];
static boolean_T xcpDaqWriteLock(XcpEventIdType eventId)
{
XCP_MUTEX_LOCK(xcpEventData[eventId].lock);
return true;
}
static void xcpDaqWriteUnlock(XcpEventIdType eventId)
{
XCP_MUTEX_UNLOCK(xcpEventData[eventId].lock);
}
static void xcpDaqReadLock(XcpEventIdType eventId)
{
XCP_MUTEX_LOCK(xcpEventData[eventId].lock);
}
static void xcpDaqReadUnlock(XcpEventIdType eventId)
{
XCP_MUTEX_UNLOCK(xcpEventData[eventId].lock);
}
#endif
#ifndef XCP_MIN_EVENT_NO_RESERVED_POOL
#define XCP_MIN_EVENT_NO_RESERVED_POOL 0xFFFF
#endif
/*****************************************************************************
Internal Functions specific to DAQ support
******************************************************************************/
/* When enabled, timestamp is required for first ODT in the DAQ list */
#if XCP_TIMESTAMP_FIXED == 0
#define IS_TIMESTAMP_REQUIRED(mode, odtNumber) ((odtNumber == 0) && \
XCP_READ_BIT_VALUE(mode, XCP_DAQ_MODE_TIMESTAMP_MASK))
#else
#define IS_TIMESTAMP_REQUIRED(mode, odtNumber) (odtNumber == 0)
#endif
/* Incrementing pointer. byteOffset argument will have a valid value only when
we are emulating as a BYTE addressable target. Otherwise, it is 0. */
#ifdef XCP_EMULATE_BYTE_ADDRESSABLE_TARGET
#define XCP_HW_PTR_INCREMENT(ptr, incr, byteOffset) (uint8_T *)(byteOffset ? (ptr + XCP_IN_HW_AG(incr - 1)) : (ptr + XCP_IN_HW_AG(incr)))
#else
#define XCP_HW_PTR_INCREMENT(ptr, incr, byteOffset) (uint8_T *)(ptr + incr)
#endif
/* Update byteOffset when emulating as a BYTE addressable target */
#ifdef XCP_EMULATE_BYTE_ADDRESSABLE_TARGET
#define XCP_HW_BYTE_OFFSET_UPDATE(offset, size) ((offset + size) % XCP_HARDWARE_ADDRESS_GRANULARITY_BYTES_NUMBER)
#else
#define XCP_HW_BYTE_OFFSET_UPDATE(offset, size) 0
#endif
#define IS_USING_CUSTOM_POOL(x) (x == xcpEventCustomMemoryManager.eventId)
static boolean_T isActiveDaqList(uint16_T daqListId)
{
return (xcpDynamicDaqLists.daq[daqListId].status == XCP_DAQ_SELECTED) ||
(xcpDynamicDaqLists.daq[daqListId].status == XCP_DAQ_STARTED);
}
static boolean_T noActiveDaqLists(void)
{
boolean_T found = 0;
uint16_T i = 0;
for (i = 0; (i < xcpDynamicDaqLists.daqCount) && !found; i++) {
found = isActiveDaqList(i);
}
return !found;
}
static boolean_T isValidDaqPtr(uint16_T daqListId, uint8_T odtId, uint8_T odtEntryId)
{
boolean_T ret = (xcpDynamicDaqLists.daq != NULL) &&
(daqListId < xcpDynamicDaqLists.daqCount) &&
(xcpDynamicDaqLists.daq[daqListId].odt != NULL) &&
(odtId < xcpDynamicDaqLists.daq[daqListId].odtCount) &&
(xcpDynamicDaqLists.daq[daqListId].odt[odtId].entry != NULL) &&
(odtEntryId < xcpDynamicDaqLists.daq[daqListId].odt[odtId].entriesCount);
return ret;
}
static boolean_T isValidDaqEntry(uint8_T bitOffset, uint8_T size, uint32_T address)
{
boolean_T ret = false;
uint16_T entrySize = size; /* cast to 16-bit avoids -Werror=type-limits */
if (bitOffset == 0xFF) {
/* "Normal" (non-bitwise) access has been selected */
if ( (entrySize <= XCP_MAX_ODT_ENTRY_SIZE) &&
(entrySize % (XCP_ODT_ENTRY_SIZE_GRANULARITY/XCP_ADDRESS_GRANULARITY_BYTES_NUMBER) == 0) &&
( address % (XCP_ODT_ENTRY_SIZE_GRANULARITY/XCP_ADDRESS_GRANULARITY_BYTES_NUMBER) == 0) ) {
ret = true;
}
}
else {
/* Bitwise access has been selected */
if ((bitOffset <= 0x1F) && (address % XCP_ODT_ENTRY_SIZE_GRANULARITY) &&
(entrySize == XCP_ODT_ENTRY_SIZE_GRANULARITY)) {
ret = true;
}
}
return ret;
}
/** The current supported mode includes:
- alternating = 0
- direction = 0 (DAQ only)
- timestamp = 1 or 0
- PID OFF = 0
*/
static boolean_T isSupportedDaqListMode(uint8_T mode)
{
#if XCP_TIMESTAMP_FIXED == 0
return ( (mode & ~XCP_DAQ_MODE_TIMESTAMP_MASK) == 0 );
#else
return (mode == XCP_DAQ_MODE_TIMESTAMP_MASK);
#endif
}
static boolean_T isValidStartStopMode(uint8_T mode)
{
boolean_T ret = false;
if ((mode == XCP_DAQ_LIST_STOP) ||
(mode == XCP_DAQ_LIST_START) ||
(mode == XCP_DAQ_LIST_SELECT))
ret = true;
return ret;
}
static boolean_T isValidStartStopSynchMode(uint8_T mode)
{
boolean_T ret = false;
if ((mode == XCP_DAQ_LIST_STOP_ALL) ||
(mode == XCP_DAQ_LIST_START_SELECTED) ||
(mode == XCP_DAQ_LIST_STOP_SELECTED))
ret = true;
return ret;
}
static boolean_T absoluteOdtNumberIdentification(void)
{
return (XCP_ID_FIELD_TYPE == XCP_ID_ABSOLUTE_ODT_NUMBER);
}
static boolean_T isDaqPackedModeEnabled(uint16_