clc;
clear;
close all;
warning off;
addpath(genpath(pwd));
% Parameters
fs = 64; % sampling rate
W = floor(2*fs); % windows length % changed by ivine
eff_win_scale = 1;
Tmax = 375; % decoder lag (ms)
lag = ceil(Tmax*fs/1000); % decoder lag (samples)
plt_fig = 1;
count = 0;
count = count +1;
subject = 'sample_EEG.mat';
data_electrode = 12;
noise_electrode = 21;
attention = [1 1 1 1 2 2];
[s1, s2, eeg, noise, durations] = getData(subject, data_electrode, noise_electrode, Tmax);
pre_tr_order = [1 2];
tr_order = [3 6];
te_order = [4 5];
fprintf('MSE_init: [%i %i] Train: [%i %i] Test: [%i %i] ',pre_tr_order, tr_order, te_order);
dec_1_init = zeros(lag,1);
dec_2_init = zeros(lag,1);
MSE_1_init = eye(lag);
MSE_2_init = eye(lag);
for seg_id = pre_tr_order
t = 1:durations(seg_id);
eeg_train = eeg(t,seg_id);
noise_train = noise(t,seg_id);
K = floor(length(eeg_train)/(W*eff_win_scale));
spkr_pr_tr_1 = s1(t,seg_id);
spkr_pr_tr_2 = s2(t,seg_id);
start_idx = 1;
for k = 1:K
stop_idx = start_idx + W-1;
e_d = eeg_train(start_idx:stop_idx);
e_d = e_d - mean(e_d);
e_n = noise_train(start_idx:stop_idx);
e_n = e_n - mean(e_n);
s_1 = spkr_pr_tr_1(start_idx:stop_idx);
s_2 = spkr_pr_tr_2(start_idx:stop_idx);
[dec_1_init, dec_2_init, MSE_1_init, MSE_2_init] = seq_LMMSE(e_d, e_n, s_1, s_2,...
dec_1_init, dec_2_init, MSE_1_init, MSE_2_init, lag);
start_idx = start_idx + (W*eff_win_scale);
end
end
sub_att_dec = [];
sub_unatt_dec = [];
m1_att_markers = [];
m2_att_markers = [];
att_dec = (dec_1_init + dec_2_init)/2;
unatt_dec = (dec_1_init + dec_2_init)/2;
MSE_a = (MSE_1_init + MSE_2_init)/2;
MSE_u = (MSE_1_init + MSE_2_init)/2;
for seg_id = tr_order
t = 1:durations(seg_id);
eeg_train = eeg(t,seg_id);
noise_train = noise(t,seg_id);
K = floor(length(eeg_train)/(W*eff_win_scale));
if (1 == attention(seg_id))
speaker_att = s1(t,seg_id);
speaker_unatt = s2(t,seg_id);
start_idx = 1;
for k = 1:K
stop_idx = start_idx + W-1;
e_d = eeg_train(start_idx:stop_idx);
e_d = e_d - mean(e_d);
e_n = noise_train(start_idx:stop_idx);
e_n = e_n - mean(e_n);
s_a = speaker_att(start_idx:stop_idx);
s_u = speaker_unatt(start_idx:stop_idx);
% Compute decoder with LMMSE
[att_dec, unatt_dec, MSE_a, MSE_u] = seq_LMMSE(e_d, e_n, s_a, s_u, att_dec, unatt_dec, MSE_a, MSE_u, lag);
sub_att_dec = [sub_att_dec att_dec];
sub_unatt_dec = [sub_unatt_dec unatt_dec];
% Compute Attention markers
n1_a = getMarker(att_dec,1);
p2_a = getMarker(att_dec,2);
n1_u = getMarker(unatt_dec,1);
p2_u = getMarker(unatt_dec,2);
% Store markers
tmp_m_a = abs(n1_a) + abs(p2_a);
tmp_m_u = abs(n1_u) + abs(p2_u);
% if m1 is the attended speaker, then the first column will have
% attended marker and vice versa
m1_att_markers = [m1_att_markers; [tmp_m_a tmp_m_u]];
start_idx = start_idx + (W*eff_win_scale);
clear tmp_m_a tmp_m_u
end
elseif (2 == attention(seg_id))
speaker_att = s2(t,seg_id);
speaker_unatt = s1(t,seg_id);
start_idx = 1;
for k = 1:K
stop_idx = start_idx + W-1;
e_d = eeg_train(start_idx:stop_idx);
e_d = e_d - mean(e_d);
e_n = noise_train(start_idx:stop_idx);
e_n = e_n - mean(e_n);
s_a = speaker_att(start_idx:stop_idx);
s_u = speaker_unatt(start_idx:stop_idx);
% Compute decoder with LMMSE
[att_dec, unatt_dec, MSE_a, MSE_u] = seq_LMMSE(e_d, e_n, s_a, s_u, att_dec, unatt_dec, MSE_a, MSE_u, lag);
sub_att_dec = [sub_att_dec att_dec];
sub_unatt_dec = [sub_unatt_dec unatt_dec];
% Compute Attention markers
n1_a = getMarker(att_dec,1);
p2_a = getMarker(att_dec,2);
n1_u = getMarker(unatt_dec,1);
p2_u = getMarker(unatt_dec,2);
tmp_m_a = abs(n1_a) + abs(p2_a);
tmp_m_u = abs(n1_u) + abs(p2_u);
% if m2 is the attended speaker, then the second column will have
% attended marker and vice versa
m2_att_markers = [m2_att_markers; [tmp_m_u tmp_m_a]];
start_idx = start_idx + (W*eff_win_scale);
clear tmp_m_a tmp_m_u
end
end
end
%% SVM
rng('default')
X = [m1_att_markers; m2_att_markers];
y = [-1*ones(size(m1_att_markers,1), 1); ones(size(m2_att_markers,1), 1)]; % m1 = -1; m2 = +1
svm_Mdl = fitcsvm(X, y, 'KernelFunction','linear', 'Standardize',true);
ScoreSVMModel = fitPosterior(svm_Mdl, X, y);
% Compute the scores over a grid
d = 0.25; % Step size of the grid
[x1Grid, x2Grid] = meshgrid(min(X(:,1)): d: 2*max(X(:,1)), min(X(:,2)): d: 2*max(X(:,2)));
xGrid = [x1Grid(:), x2Grid(:)]; % The grid
[~,scores1] = predict(svm_Mdl, xGrid); % The scores
%% Testing
dec_1 = (dec_1_init + dec_2_init)/2;
dec_2 = (dec_1_init + dec_2_init)/2;
MSE_1 = (MSE_1_init + MSE_2_init)/2;
MSE_2 = (MSE_1_init + MSE_2_init)/2;
test_markers = [];
true_label = [];
for seg_id = te_order
t = 1:durations(seg_id);
eeg_test = eeg(t,seg_id);
noise_test = noise(t,seg_id);
% Number of windows
K = floor(length(eeg_test)/(W*eff_win_scale));
% m1 = -1; m2 = +1
tmp_attn = 2*attention(seg_id)-3;
true_label = [true_label; tmp_attn*ones(K,1)];
spkr_te_1 = s1(t,seg_id);
spkr_te_2 = s2(t,seg_id);
start_idx = 1;
for k = 1:K
if (1 == k)
stop_idx = start_idx + W-1;
else
stop_idx = start_idx + (W*eff_win_scale)-1;
end
e_d = eeg_test(start_idx:stop_idx);
e_n = noise_test(start_idx:stop_idx);
s_1 = spkr_te_1(start_idx:stop_idx);
s_2 = spkr_te_2(start_idx:stop_idx);
% Compute decoder with LMMSE
[dec_1, dec_2, MSE_1, MSE_2] = seq_LMMSE(e_d, e_n, s_1, s_2, dec_1, dec_2, MSE_1, MSE_2, lag);
% Compute attention markers
n1_dec1 = getMarker(dec_1,1);
p2_dec1 = getMarker(dec_1,2);
n1_dec2 = getMarker(dec_2,1);
p2_dec2 = getMarker(dec_2,2);
tmp_m1 = abs(n1_dec1) + abs(p2_dec1);
tmp_m2 = abs(n1_dec2) + abs(p2_dec2);
test_markers = [test_markers; [tmp_m1 tmp_m2]];
start_idx = start_idx + (W*eff_win_scale);
clear tmp_m1 tmp_m2
end
end
%%
[y_test_pred, soft_op] = predict(ScoreSVMModel, test_markers);
% [y_test_pred, soft_op] = predict(svm_Mdl, test_markers);
if attention(1) == 1
TP = sum(y_test_pred(y_test_pred==1) == true_label(y_test_pred==1));
TN = sum(y_test_pred(y_test_pred==-1) == true_label(y_test_pred==-1));
FP = sum(y_test_pred(y_test_pred==1) ~= true_label(y_test_pred==1));
FN = sum(y_test_pred(y_test_pred==-1) ~= true_label(y_test_pred==-1));
elseif attention(1)== 2
TP = sum(y_test_pred(y_test_pred==-1) == true_label(y_test_pred==-1));
TN = sum(y_test_pred(y_test_pred==1) == true_label(y_test_pred==1));
FP = sum(y_test_pred(y_test_pred==-1) ~= true_label(y_test_pred==-1));
FN = sum(y_test_pred(y_test_pred==1) ~= true_label(y_test_pred==1));
end
precision = TP/(TP+FP);
recall = TP/(TP+FN);
misclassified = (y_test_pred ~= true_label);
acc = 100*sum(y_test_pred == true_label)/ length(true_label);
f1_scores = 100*2*(precision *recall)/(precision + recall);
tmp_acc_th = -1*(test_markers(:,1) - test_markers(:,2)).*true_label;
acc_th = 100*(sum(tmp_acc_th>0)/length(tmp_acc_th));
clear tmp_acc_th;
%%
X_test = test_markers;
tot_num_win = size(soft_op,1);
t = linspace(0, tot_num_win*W*eff_win_scale/fs, tot_num_win);
ft_size = 12;
ln_width = 1.2;
scale = 2;
color = [[ 0.114 0.44 1]; [1 0.41 0.153]];
% color = ['r'; 'b'];
if(plt_fig)
figure('pos',[600 50 600 650]);
p
matlab-基于序贯LMMSE进行听觉注意力估计matlab仿真-源码
版权申诉
64 浏览量
2021-09-18
00:17:00
上传
评论
收藏 20.07MB RAR 举报
mYlEaVeiSmVp
- 粉丝: 1880
- 资源: 19万+
最新资源
- 常用工具集参考用于图像等数据处理
- 音乐展示网页、基于Stenography的图像数字水印添加与提取,以及基于颜色矩和Tamura算法的图像相似度评估算法py源码
- 基于EmguCV(OpenCV .net封装),图像数字水印加解密算法的实现,其中包含最低有效位算法,离散傅里叶变换算法+文档书
- 基于matlab+DWT的图像水印项目,数字水印+源代码+文档说明+图片+报告pdf
- (优秀毕业设计)基于python实现的数字图像可视化水印系统的设计与实现,多种数字算法实现+源代码+文档说明+理论演示pdf
- 基于DWT-DCT-SVD和deflate压缩的数字水印方法python源码+Gui界面+演示视频(高分毕业设计)
- 基于matlab实现DWT、DCT、SVD算法数字图像水印可视化系统+GUI界面+文档说明+详细注释(高分毕业设计)
- NCIAE-Data-Structure大一大二笔记
- 学习wireshark笔记
- digital-image-数据可视化笔记
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈