classdef helperLidarObjectDetectionDisplay < handle
%helperLidarObjectDetectionDisplay helper class to display segmented
% point cloud data with oriented bounding box visualization.
% Copyright 2019-2020 The Mathworks Inc.
properties
PositionIndex = [1 3 6];
VelocityIndex = [2 4 7];
DimensionIndex = [9 10 11];
YawIndex = 8;
end
properties (Access = private)
% Image handle for visualizing segmented 2D images of organized
% point cloud
SegmentedImageH;
% Axes handle for point cloud visualization
PcPlotAxes;
% Axes handle for point cloud with oriented bounding box
% visualization
PcPlotBboxAxes;
% Scatter plot handle for visualizing segmented colored point cloud
% from network
ScatterPlotH;
% Scatter plot handle for visualizing point cloud with oriented
% bounding box
ScatterPlotWithBboxH;
% Image handle for visualizing ground truth labels
Lidar2DImageH;
% Rgb image axes handle
ImageAxesH;
% Property for holding all the holding frames from figure handle
% frames used for writing output video
PFrames;
% Main gui figure handle
FigureH;
% Range in x direction
XRange;
% Range in y direction
YRange;
% Range in z direction
ZRange;
% Set it if want to write a video output for the results
WriteVideo
end
properties (Constant, Access = protected)
CuboidFaces = [1 5 8 4; ... %far
2 3 7 6; ... %near
5 6 7 8; ... %top
2 1 4 3; ... %bot
3 4 8 7; ... %left
5 6 2 1; ... %right
];
% Calibration matrix used for projecting 3D point to image
Calibration=[];
% Default range in X direction
DefaultXRange = [-30,30]; % x range to crop display
% Default range in y direction
DefaultYRange =[-12,12];% y range to crop display
% Default range in z direction
DefaultZRange =[-3,8];
% Default value to write video
DefaultWriteVideo = false;
end
methods
%--------------------------------------------------------------------------
function obj = helperLidarObjectDetectionDisplay(varargin)
%% Parse name value parser argument
parser = inputParser;
parser.CaseSensitive = false;
% Parameter Validator
validScalarPosNum = @(x) (isequal(size(x),[1,2]));
validCalibration = @(x) (isequal(size(x),[3,4]));
validVideo = @(x) (islogical(x));
% Optional argument parsers
addOptional(parser,'XLimits',obj.DefaultXRange,validScalarPosNum);
addOptional(parser,'YLimits',obj.DefaultYRange,validScalarPosNum);
addOptional(parser,'ZLimits',obj.DefaultZRange,validScalarPosNum);
addOptional(parser,'Calibration',obj.Calibration,validCalibration);
addOptional(parser,'Video',obj.DefaultWriteVideo,validVideo);
% Parse input arguments
parse(parser,varargin{:});
obj.XRange=parser.Results.XLimits;
obj.YRange=parser.Results.YLimits;
obj.ZRange=parser.Results.ZLimits;
obj.WriteVideo = parser.Results.Video;
%% Initialize main figure
obj.FigureH = figure('Visible','off','Position',[0, 0, 1200, 640],...
'Name','Object Detection','color',[0 0 0],'InvertHardcopy','off');
%% Initialize panel for point cloud visualization
pointCloudPanel = uipanel('Parent',obj.FigureH,'Position',[0.01,0,0.50,0.58],...
'BackgroundColor',[0 0 0],'Title','Oriented Bounding Box Detection',...
'ForegroundColor',[1,1,1],'FontSize',15);
obj.PcPlotAxes = axes('Parent',pointCloudPanel,'Color',[0 0 0],...
'Position',[0,0,1,1],'NextPlot','replacechildren',...
'XLim',obj.XRange,'YLim',obj.YRange,'ZLim',obj.ZRange);
axis(obj.PcPlotAxes,'equal');
obj.PcPlotAxes.XLimMode = 'manual';
obj.PcPlotAxes.YLimMode = 'manual';
obj.PcPlotAxes.ZLimMode = 'manual';
obj.ScatterPlotH = scatter3(obj.PcPlotAxes,nan,nan,nan, 7, '.');
view(obj.PcPlotAxes,3);
campos([-169.3414 -242.6887 131.6010]);
%% Bounding box point cloud visualizationWriteVideo
pointCloudPanel1 = uipanel('Parent',obj.FigureH,'Position',[0.5,0,0.50,0.58],...
'BackgroundColor',[0 0 0],'Title','Top View'...
,'ForegroundColor',[1,1,1],'FontSize',15);
obj.PcPlotBboxAxes = axes('Parent',pointCloudPanel1,'Color',[0 0 0],...
'NextPlot','replacechildren','Position' , [0,0,1,1],...
'XLim',obj.XRange,'YLim',obj.YRange,'ZLim',obj.ZRange);
obj.PcPlotBboxAxes.Title.Color = [1 1 1];
axis(obj.PcPlotBboxAxes,'equal');
obj.PcPlotBboxAxes.XLimMode = 'manual';
obj.PcPlotBboxAxes.YLimMode = 'manual';
obj.PcPlotBboxAxes.ZLimMode = 'manual';
obj.ScatterPlotWithBboxH = scatter3(obj.PcPlotBboxAxes,nan,nan,nan, 7, '.');
view(obj.PcPlotBboxAxes,3);
campos([0 0 323.5756]);
%% Segmented Point CLoud Image visualization
hFrontView1 = uipanel(obj.FigureH, 'Position', [0.05 0.58 0.90 0.2],...
'Title','Segmented Image','FontSize',15);
ImageAxes1 = axes('Parent',hFrontView1);
obj.SegmentedImageH = imshow([],'Parent',ImageAxes1,'DisplayRange',[0,70]);
ImageAxes1.NextPlot = 'add';
ImageAxes1.Position = [0,0,1,1];
hFrontView1.BackgroundColor = [0 0 0];
hFrontView1.ForegroundColor = [1 1 1];
axis(ImageAxes1,'tight');
%% Range Lidar Image visualization
hFrontView2 = uipanel(obj.FigureH, 'Position', [0.05 0.78 0.90 0.2]...
,'Title','Lidar Range Image','FontSize',15);
ImageAxes2 = axes('Parent',hFrontView2);
obj.Lidar2DImageH = imshow([],'Parent',ImageAxes2,'DisplayRange',[0,40]);
ImageAxes2.NextPlot = 'add';
ImageAxes2.Position = [0,0,1,1];
hFrontView2.BackgroundColor = [0 0 0];
hFrontView2.ForegroundColor = [1 1 1];
colormap(ImageAxes2,'jet');
axis(ImageAxes2,'tight');
end
%--------------------------------------------------------------------------
function updateSegmentedImage(obj, segmentedImageLabel, lidarImage)
%updateSegmentedImage Method to update display with segmented
% image label and lidar range or intensity image.
%
% Inputs:
% segmentedImageLabel Segmented output lidar image from the
% segmentation network
%
% rawInput Range or Intensity image from lidar
% data
obj.Lidar2DImageH.CData = lidarImage;
obj.SegmentedImageH.CData = segmentedImageLabel;
end
%--------------------------------------------------------------------------
function updatePointCloud(obj, pc)
%updatePC Method to update display with point cloud.
% Inputs:
% pc pointCloud type object
location = pc.Location;
s = size(
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
此示例演示如何使用安装在 ego 车辆上的激光雷达传感器捕获的激光雷达点云数据来检测、分类和跟踪车辆。此示例中使用的激光雷达数据是从高速公路驾驶方案中记录的。在此示例中,对点云数据进行分段,以确定使用网络的对象类别。具有交互式多模型滤波器的联合概率数据关联(JPDA)跟踪器用于跟踪检测到的车辆。 感知模块在实现具有ADAS系统的车辆的完全自主性方面发挥着重要作用。激光雷达和摄像头是感知工作流程中必不可少的传感器。激光雷达擅长提取物体的精确深度信息,而相机则产生丰富而详细的环境信息,这对于物体分类非常有用。 此示例主要包括以下部分: 接地层分割 语义分割 定向边界框拟合 面向跟踪的边界框 流程图概述了整个系统。
资源推荐
资源详情
资源评论
收起资源包目录
使用激光雷达检测分类跟踪车辆仿真.rar (12个子文件)
使用激光雷达检测分类跟踪车辆仿真
helperLidarModel.m 1KB
helperReweightLayer.p 440B
helperSigmoidLayer.p 248B
helperMultiClassInitIMMFilter.m 5KB
helperLidarObjecDetector.m 10KB
helperResizeLayer.p 342B
helperCvmeasCuboid.m 478B
helperLidarObjectDetectionDisplay.m 15KB
helperCtmeasCuboid.m 476B
helperConstvelCuboid.m 725B
helperConstturnCuboid.m 920B
DetectClassifyAndTrackOrientedBoundingBoxInLidarExample.mlx 113KB
共 12 条
- 1
珞瑜·
- 粉丝: 10w+
- 资源: 500
下载权益
C知道特权
VIP文章
课程特权
开通VIP
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
前往页